diff options
author | Koundinya Veluri <kouvel@users.noreply.github.com> | 2018-03-15 10:05:42 -0700 |
---|---|---|
committer | GitHub <noreply@github.com> | 2018-03-15 10:05:42 -0700 |
commit | 1c5d0281719f2aad04e6738b99c845b4f95c214a (patch) | |
tree | 66fdb672bfab2618abd670d5744e1ec3a32bedd9 /src/vm/amd64 | |
parent | 65d0df04b37ec2679e087d813597cc524b2465c7 (diff) | |
download | coreclr-1c5d0281719f2aad04e6738b99c845b4f95c214a.tar.gz coreclr-1c5d0281719f2aad04e6738b99c845b4f95c214a.tar.bz2 coreclr-1c5d0281719f2aad04e6738b99c845b4f95c214a.zip |
Fix to not reuse preallocated jump stubs for dynamic methods (#16941)
Fix to not reuse preallocated jump stubs for dynamic methods
Fixes https://github.com/dotnet/coreclr/issues/16940
- Allocate an extra jump stub per temporary entry points chunk that is shared by all precodes in the chunk. This jump stub always points to PrecodeFixupThunk.
- Use that for PrecodeFixupThunk, and use the precode-associated jump stub for pointing to the jitted code
- Considered allocating the extra jump stub only if coreclr is far away, but it involves reallocation which may be common in some environments/scenarios. Figured 12 extra bytes per dynamic type is not too significant.
Diffstat (limited to 'src/vm/amd64')
-rw-r--r-- | src/vm/amd64/cgenamd64.cpp | 11 | ||||
-rw-r--r-- | src/vm/amd64/cgencpu.h | 2 |
2 files changed, 10 insertions, 3 deletions
diff --git a/src/vm/amd64/cgenamd64.cpp b/src/vm/amd64/cgenamd64.cpp index 56e3bfa738..6d11c7f0fa 100644 --- a/src/vm/amd64/cgenamd64.cpp +++ b/src/vm/amd64/cgenamd64.cpp @@ -689,7 +689,7 @@ INT32 rel32UsingJumpStub(INT32 UNALIGNED * pRel32, PCODE target, MethodDesc *pMe return static_cast<INT32>(offset); } -INT32 rel32UsingPreallocatedJumpStub(INT32 UNALIGNED * pRel32, PCODE target, PCODE jumpStubAddr) +INT32 rel32UsingPreallocatedJumpStub(INT32 UNALIGNED * pRel32, PCODE target, PCODE jumpStubAddr, bool emitJump) { CONTRACTL { @@ -711,7 +711,14 @@ INT32 rel32UsingPreallocatedJumpStub(INT32 UNALIGNED * pRel32, PCODE target, PCO EEPOLICY_HANDLE_FATAL_ERROR(COR_E_EXECUTIONENGINE); } - emitBackToBackJump((LPBYTE)jumpStubAddr, (LPVOID)target); + if (emitJump) + { + emitBackToBackJump((LPBYTE)jumpStubAddr, (LPVOID)target); + } + else + { + _ASSERTE(decodeBackToBackJump(jumpStubAddr) == target); + } } _ASSERTE(FitsInI4(offset)); diff --git a/src/vm/amd64/cgencpu.h b/src/vm/amd64/cgencpu.h index 9136b168aa..ab049a49e2 100644 --- a/src/vm/amd64/cgencpu.h +++ b/src/vm/amd64/cgencpu.h @@ -383,7 +383,7 @@ INT32 rel32UsingJumpStub(INT32 UNALIGNED * pRel32, PCODE target, MethodDesc *pMe LoaderAllocator *pLoaderAllocator = NULL, bool throwOnOutOfMemoryWithinRange = true); // Get Rel32 destination, emit jumpStub if necessary into a preallocated location -INT32 rel32UsingPreallocatedJumpStub(INT32 UNALIGNED * pRel32, PCODE target, PCODE jumpStubAddr); +INT32 rel32UsingPreallocatedJumpStub(INT32 UNALIGNED * pRel32, PCODE target, PCODE jumpStubAddr, bool emitJump); void emitCOMStubCall (ComCallMethodDesc *pCOMMethod, PCODE target); |