summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/inc/gcinfotypes.h6
-rw-r--r--src/vm/gccover.cpp588
-rw-r--r--src/vm/gccover.h8
-rw-r--r--src/vm/method.cpp13
-rw-r--r--src/vm/method.hpp6
5 files changed, 375 insertions, 246 deletions
diff --git a/src/inc/gcinfotypes.h b/src/inc/gcinfotypes.h
index 652ab9fbff..d45b9a6292 100644
--- a/src/inc/gcinfotypes.h
+++ b/src/inc/gcinfotypes.h
@@ -251,6 +251,12 @@ inline bool IsValidFieldReturnKind(ReturnKind returnKind)
return (returnKind == RT_Scalar || returnKind == RT_Object || returnKind == RT_ByRef);
}
+inline bool IsPointerFieldReturnKind(ReturnKind returnKind)
+{
+ _ASSERTE(IsValidFieldReturnKind(returnKind));
+ return (returnKind == RT_Object || returnKind == RT_ByRef);
+}
+
inline bool IsValidReturnRegister(size_t regNo)
{
return (regNo == 0)
diff --git a/src/vm/gccover.cpp b/src/vm/gccover.cpp
index be4aa36bd6..c5cbdb0c3c 100644
--- a/src/vm/gccover.cpp
+++ b/src/vm/gccover.cpp
@@ -36,8 +36,8 @@
/****************************************************************************/
MethodDesc* AsMethodDesc(size_t addr);
-static SLOT getTargetOfCall(SLOT instrPtr, PCONTEXT regs, SLOT*nextInstr);
-bool isCallToStopForGCJitHelper(SLOT instrPtr);
+static PBYTE getTargetOfCall(PBYTE instrPtr, PCONTEXT regs, PBYTE*nextInstr);
+bool isCallToStopForGCJitHelper(PBYTE instrPtr);
#if defined(_TARGET_ARM_) || defined(_TARGET_ARM64_)
static void replaceSafePointInstructionWithGcStressInstr(UINT32 safePointOffset, LPVOID codeStart);
static bool replaceInterruptibleRangesWithGcStressInstr (UINT32 startOffset, UINT32 stopOffset, LPVOID codeStart);
@@ -73,6 +73,94 @@ static MethodDesc* getTargetMethodDesc(PCODE target)
}
+bool IsGcCoverageInterruptInstruction(PBYTE instrPtr)
+{
+#if defined(_TARGET_ARM64_)
+ UINT32 instrVal = *reinterpret_cast<UINT32*>(instrPtr);
+ switch (instrVal)
+ {
+ case INTERRUPT_INSTR:
+ case INTERRUPT_INSTR_CALL:
+ case INTERRUPT_INSTR_PROTECT_RET:
+ return true;
+ default:
+ return false;
+ }
+#elif defined(_TARGET_ARM_)
+
+ size_t instrLen = GetARMInstructionLength(instrPtr);
+ if (instrLen == 2)
+ {
+ UINT16 instrVal = *reinterpret_cast<UINT16*>(instrPtr);
+ switch (instrVal)
+ {
+ case INTERRUPT_INSTR:
+ case INTERRUPT_INSTR_CALL:
+ case INTERRUPT_INSTR_PROTECT_RET:
+ return true;
+ default:
+ return false;
+ }
+ }
+ else
+ {
+ _ASSERTE(instrLen == 4);
+ UINT32 instrVal = *reinterpret_cast<UINT32*>(instrPtr);
+ switch (instrVal)
+ {
+ case INTERRUPT_INSTR_32:
+ case INTERRUPT_INSTR_CALL_32:
+ case INTERRUPT_INSTR_PROTECT_RET_32:
+ return true;
+ default:
+ return false;
+ }
+ }
+#else // x64 and x86
+ UINT8 instrVal = *reinterpret_cast<UINT8*>(instrPtr);
+ switch (instrVal)
+ {
+ case INTERRUPT_INSTR:
+ case INTERRUPT_INSTR_CALL:
+ case INTERRUPT_INSTR_PROTECT_FIRST_RET:
+ case INTERRUPT_INSTR_PROTECT_SECOND_RET:
+ case INTERRUPT_INSTR_PROTECT_BOTH_RET:
+ return true;
+ default:
+ return false;
+ }
+#endif
+}
+
+bool IsOriginalInstruction(PBYTE instrPtr, GCCoverageInfo* gcCover, DWORD offset)
+{
+#if defined(_TARGET_ARM64_)
+ UINT32 instrVal = *reinterpret_cast<UINT32*>(instrPtr);
+ UINT32 origInstrVal = *reinterpret_cast<UINT32*>(gcCover->savedCode + offset);
+ return (instrVal == origInstrVal);
+#elif defined(_TARGET_ARM_)
+ size_t instrLen = GetARMInstructionLength(instrPtr);
+ if (instrLen == 2)
+ {
+ UINT16 instrVal = *reinterpret_cast<UINT16*>(instrPtr);
+ UINT16 origInstrVal = *reinterpret_cast<UINT16*>(gcCover->savedCode + offset);
+ return (instrVal == origInstrVal);
+ }
+ else
+ {
+ _ASSERTE(instrLen == 4);
+ UINT32 instrVal = *reinterpret_cast<UINT32*>(instrPtr);
+ UINT32 origInstrVal = *reinterpret_cast<UINT32*>(gcCover->savedCode + offset);
+ return (instrVal == origInstrVal);
+ }
+#else // x64 and x86
+ UINT8 instrVal = *reinterpret_cast<UINT8*>(instrPtr);
+ UINT8 origInstrVal = gcCover->savedCode[offset];
+ return (instrVal == origInstrVal);
+#endif
+}
+
+
void SetupAndSprinkleBreakpoints(
MethodDesc * pMD,
EECodeInfo * pCodeInfo,
@@ -250,6 +338,94 @@ void SetupGcCoverageForNativeImage(Module* module)
}
#endif
+
+void ReplaceInstrAfterCall(PBYTE instrToReplace, MethodDesc* callMD)
+{
+ ReturnKind returnKind = callMD->GetReturnKind(true);
+ if (!IsValidReturnKind(returnKind))
+ {
+#if defined(_TARGET_AMD64_) && defined(PLATFORM_UNIX)
+ _ASSERTE(!"Unexpected return kind for x64 Unix.");
+#else
+ // SKip GC coverage after the call.
+ return;
+#endif
+ }
+ _ASSERTE(IsValidReturnKind(returnKind));
+
+ bool ispointerKind = IsPointerReturnKind(returnKind);
+#ifdef _TARGET_ARM_
+ size_t instrLen = GetARMInstructionLength(instrToReplace);
+ bool protectReturn = ispointerKind;
+ if (protectReturn)
+ if (instrLen == 2)
+ *(WORD*)instrToReplace = INTERRUPT_INSTR_PROTECT_RET;
+ else
+ *(DWORD*)instrToReplace = INTERRUPT_INSTR_PROTECT_RET_32;
+ else
+ if (instrLen == 2)
+ *(WORD*)instrToReplace = INTERRUPT_INSTR;
+ else
+ *(DWORD*)instrToReplace = INTERRUPT_INSTR_32;
+#elif defined(_TARGET_ARM64_)
+ bool protectReturn = ispointerKind;
+ if (protectReturn)
+ *(DWORD*)instrToReplace = INTERRUPT_INSTR_PROTECT_RET;
+ else
+ *(DWORD*)instrToReplace = INTERRUPT_INSTR;
+#elif defined(_TARGET_AMD64_) || defined(_TARGET_X86_)
+
+
+ if (ispointerKind)
+ {
+ bool protectRegister[2] = { false, false };
+
+ bool moreRegisters = false;
+
+ ReturnKind fieldKind1 = ExtractRegReturnKind(returnKind, 0, moreRegisters);
+ if (IsPointerFieldReturnKind(fieldKind1))
+ {
+ protectRegister[0] = true;
+ }
+ if (moreRegisters)
+ {
+ ReturnKind fieldKind2 = ExtractRegReturnKind(returnKind, 1, moreRegisters);
+ if (IsPointerFieldReturnKind(fieldKind2))
+ {
+ protectRegister[1] = true;
+ }
+ }
+ _ASSERTE(!moreRegisters);
+
+ if (protectRegister[0] && !protectRegister[1])
+ {
+ *instrToReplace = INTERRUPT_INSTR_PROTECT_FIRST_RET;
+ }
+ else
+ {
+#if !defined(_TARGET_AMD64_) || !defined(PLATFORM_UNIX)
+ _ASSERTE(!"Not expected multi reg return with pointers.");
+#endif // !_TARGET_AMD64_ || !PLATFORM_UNIX
+ if (!protectRegister[0] && protectRegister[1])
+ {
+ *instrToReplace = INTERRUPT_INSTR_PROTECT_SECOND_RET;
+ }
+ else
+ {
+ _ASSERTE(protectRegister[0] && protectRegister[1]);
+ *instrToReplace = INTERRUPT_INSTR_PROTECT_BOTH_RET;
+ }
+ }
+ }
+ else
+ {
+ *instrToReplace = INTERRUPT_INSTR;
+ }
+#else
+ _ASSERTE(!"not implemented for platform");
+#endif
+}
+
#ifdef _TARGET_AMD64_
class GCCoverageRangeEnumerator
@@ -335,9 +511,9 @@ public:
#endif // _TARGET_AMD64_
-// When Sprinking break points, we must make sure that certain calls to
-// Thread-suspension routines inlined into the managed method are not
-// converted to GC-Stress points. Otherwise, this will lead to race
+// When sprinkling break points, we must make sure that certain calls to
+// Thread-suspension routines inlined into the managed method are not
+// converted to GC-Stress points. Otherwise, this will lead to race
// conditions with the GC.
//
// For example, for an inlined PInvoke stub, the JIT generates the following code
@@ -391,9 +567,9 @@ public:
extern "C" FCDECL0(VOID, JIT_RareDisableHelper);
/****************************************************************************/
-/* sprinkle interupt instructions that will stop on every GCSafe location
+/* sprinkle interrupt instructions that will stop on every GCSafe location
regionOffsetAdj - Represents the offset of the current region
- from the beginning of the method (is 0 for hot region)
+ from the beginning of the method (is 0 for hot region)
*/
void GCCoverageInfo::SprinkleBreakpoints(
@@ -416,7 +592,7 @@ void GCCoverageInfo::SprinkleBreakpoints(
ClrVirtualProtect(codeStart, codeSize, PAGE_EXECUTE_READWRITE, &oldProtect);
}
- SLOT cur;
+ PBYTE cur;
BYTE* codeEnd = codeStart + codeSize;
EECodeInfo codeInfo((PCODE)codeStart);
@@ -471,7 +647,7 @@ void GCCoverageInfo::SprinkleBreakpoints(
// for code. It uses some for switch tables. Because the first few offsets
// may be decodable as instructions, we can't reason about where we should
// encounter invalid instructions. However, we do not want to silently skip
- // large chunks of methods just becuase the JIT started emitting a new
+ // large chunks of methods just because the JIT started emitting a new
// instruction, so only assume it is a switch table if we've seen the switch
// code (an indirect unconditional jump)
if ((len == 0) && fSawPossibleSwitch)
@@ -487,6 +663,8 @@ void GCCoverageInfo::SprinkleBreakpoints(
_ASSERTE(len > 0);
_ASSERTE(len <= (size_t)(codeEnd-cur));
+ bool skipGCForCurrentInstr = false;
+
switch(instructionType)
{
case InstructionType::Call_IndirectUnconditional:
@@ -505,14 +683,16 @@ void GCCoverageInfo::SprinkleBreakpoints(
if(safePointDecoder.IsSafePoint((UINT32)(cur + len - codeStart + regionOffsetAdj)))
#endif
{
- SLOT nextInstr;
- SLOT target = getTargetOfCall(cur, NULL, &nextInstr);
+ PBYTE nextInstr;
+ PBYTE target = getTargetOfCall(cur, NULL, &nextInstr);
if (target != 0)
{
- // JIT_RareDisableHelper() is expected to be an indirect call.
- // If we encounter a direct call (in future), skip the call
- _ASSERTE(target != (SLOT)JIT_RareDisableHelper);
+ if (target == (PBYTE)JIT_RareDisableHelper)
+ {
+ // Skip the call to JIT_RareDisableHelper.
+ skipGCForCurrentInstr = true;
+ }
targetMD = getTargetMethodDesc((PCODE)target);
}
}
@@ -530,30 +710,31 @@ void GCCoverageInfo::SprinkleBreakpoints(
break;
}
- if (prevDirectCallTargetMD != 0)
+ if (!skipGCForCurrentInstr)
{
- ReturnKind returnKind = prevDirectCallTargetMD->GetReturnKind(true);
- if (IsPointerReturnKind(returnKind))
- *cur = INTERRUPT_INSTR_PROTECT_RET;
- else
- *cur = INTERRUPT_INSTR;
- }
+ if (prevDirectCallTargetMD != 0)
+ {
+ ReplaceInstrAfterCall(cur, prevDirectCallTargetMD);
+ }
- // For fully interruptible code, we end up whacking every instruction
- // to INTERRUPT_INSTR. For non-fully interruptible code, we end
- // up only touching the call instructions (specially so that we
- // can really do the GC on the instruction just after the call).
- _ASSERTE(FitsIn<DWORD>((cur - codeStart) + regionOffsetAdj));
- if (codeMan->IsGcSafe(&codeInfo, static_cast<DWORD>((cur - codeStart) + regionOffsetAdj)))
- *cur = INTERRUPT_INSTR;
+ // For fully interruptible code, we end up whacking every instruction
+ // to INTERRUPT_INSTR. For non-fully interruptible code, we end
+ // up only touching the call instructions (specially so that we
+ // can really do the GC on the instruction just after the call).
+ size_t dwRelOffset = (cur - codeStart) + regionOffsetAdj;
+ _ASSERTE(FitsIn<DWORD>(dwRelOffset));
+ if (codeMan->IsGcSafe(&codeInfo, static_cast<DWORD>(dwRelOffset)))
+ *cur = INTERRUPT_INSTR;
#ifdef _TARGET_X86_
- // we will whack every instruction in the prolog and epilog to make certain
- // our unwinding logic works there.
- if (codeMan->IsInPrologOrEpilog((cur - codeStart) + (DWORD)regionOffsetAdj, gcInfoToken, NULL)) {
- *cur = INTERRUPT_INSTR;
- }
+ // we will whack every instruction in the prolog and epilog to make certain
+ // our unwinding logic works there.
+ if (codeMan->IsInPrologOrEpilog((cur - codeStart) + (DWORD)regionOffsetAdj, gcInfoToken, NULL))
+ {
+ *cur = INTERRUPT_INSTR;
+ }
#endif
+ }
// If we couldn't find the method desc targetMD is zero
prevDirectCallTargetMD = targetMD;
@@ -561,7 +742,7 @@ void GCCoverageInfo::SprinkleBreakpoints(
cur += len;
#ifdef _TARGET_AMD64_
- SLOT newCur = rangeEnum.EnsureInRange(cur);
+ PBYTE newCur = rangeEnum.EnsureInRange(cur);
if(newCur != cur)
{
prevDirectCallTargetMD = NULL;
@@ -573,7 +754,7 @@ void GCCoverageInfo::SprinkleBreakpoints(
// If we are not able to place an interrupt at the first instruction, this means that
// we are partially interruptible with no prolog. Just don't bother to do the
- // the epilog checks, since the epilog will be trival (a single return instr)
+ // the epilog checks, since the epilog will be trivial (a single return instr)
assert(codeSize > 0);
if ((regionOffsetAdj==0) && (*codeStart != INTERRUPT_INSTR))
doingEpilogChecks = false;
@@ -647,7 +828,7 @@ void replaceSafePointInstructionWithGcStressInstr(UINT32 safePointOffset, LPVOID
return;
}
- SLOT instrPtr = (BYTE*)PCODEToPINSTR(pCode);
+ PBYTE instrPtr = (BYTE*)PCODEToPINSTR(pCode);
// For code sequences of the type
// BL func1
@@ -657,7 +838,7 @@ void replaceSafePointInstructionWithGcStressInstr(UINT32 safePointOffset, LPVOID
// However as the first safe point is already replaced with gcstress instruction, decoding of the call
// instruction will fail when processing for the 2nd safe point. Therefore saved instruction must be used instead of
// instrPtr for decoding the call instruction.
- SLOT savedInstrPtr = ((GCCoverageInfo*)pGCCover)->savedCode + safePointOffset;
+ PBYTE savedInstrPtr = ((GCCoverageInfo*)pGCCover)->savedCode + safePointOffset;
//Determine if instruction before the safe point is call using immediate (BLX Imm) or call by register (BLX Rm)
BOOL instructionIsACallThroughRegister = FALSE;
@@ -707,7 +888,7 @@ void replaceSafePointInstructionWithGcStressInstr(UINT32 safePointOffset, LPVOID
if(instructionIsACallThroughRegister)
{
// If it is call by register then cannot know MethodDesc so replace the call instruction with illegal instruction
- // safe point will be replaced with appropiate illegal instruction at execution time when reg value is known
+ // safe point will be replaced with appropriate illegal instruction at execution time when reg value is known
#if defined(_TARGET_ARM_)
*((WORD*)instrPtr - 1) = INTERRUPT_INSTR_CALL;
#elif defined(_TARGET_ARM64_)
@@ -717,8 +898,8 @@ void replaceSafePointInstructionWithGcStressInstr(UINT32 safePointOffset, LPVOID
else if(instructionIsACallThroughImmediate)
{
// If it is call by immediate then find the methodDesc
- SLOT nextInstr;
- SLOT target = getTargetOfCall((SLOT)((WORD*)savedInstrPtr-2), NULL, &nextInstr);
+ PBYTE nextInstr;
+ PBYTE target = getTargetOfCall((PBYTE)((WORD*)savedInstrPtr-2), NULL, &nextInstr);
if (target != 0)
{
@@ -733,43 +914,24 @@ void replaceSafePointInstructionWithGcStressInstr(UINT32 safePointOffset, LPVOID
{
// The instruction about to be replaced cannot already be a gcstress instruction
-#if defined(_TARGET_ARM_)
- size_t instrLen = GetARMInstructionLength(instrPtr);
- if (instrLen == 2)
- {
- _ASSERTE(*((WORD*)instrPtr) != INTERRUPT_INSTR &&
- *((WORD*)instrPtr) != INTERRUPT_INSTR_CALL &&
- *((WORD*)instrPtr) != INTERRUPT_INSTR_PROTECT_RET);
- }
- else
- {
- _ASSERTE(*((DWORD*)instrPtr) != INTERRUPT_INSTR_32 &&
- *((DWORD*)instrPtr) != INTERRUPT_INSTR_CALL_32 &&
- *((DWORD*)instrPtr) != INTERRUPT_INSTR_PROTECT_RET_32);
- }
-#elif defined(_TARGET_ARM64_)
- {
- _ASSERTE(*((DWORD*)instrPtr) != INTERRUPT_INSTR &&
- *((DWORD*)instrPtr) != INTERRUPT_INSTR_CALL &&
- *((DWORD*)instrPtr) != INTERRUPT_INSTR_PROTECT_RET);
- }
-#endif
+ _ASSERTE(!IsGcCoverageInterruptInstruction(instrPtr));
+
//
// When applying GC coverage breakpoints at native image load time, the code here runs
// before eager fixups are applied for the module being loaded. The direct call target
// never requires restore, however it is possible that it is initially in an invalid state
// and remains invalid until one or more eager fixups are applied.
//
- // GetReturnKind consults the method signature, meaning it consults the
+ // ReplaceInstrAfterCall consults the method signature, meaning it consults the
// metadata in the owning module. For generic instantiations stored in non-preferred
// modules, reaching the owning module requires following the module override pointer for
// the enclosing MethodTable. In this case, the module override pointer is generally
// invalid until an associated eager fixup is applied.
//
- // In situations like this, GetReturnKind will try to dereference an
+ // In situations like this, ReplaceInstrAfterCall will try to dereference an
// unresolved fixup and will AV.
//
- // Given all of this, skip the GetReturnKind call by default to avoid
+ // Given all of this, skip the ReplaceInstrAfterCall call by default to avoid
// unexpected AVs. This implies leaving out the GC coverage breakpoints for direct calls
// unless COMPlus_GcStressOnDirectCalls=1 is explicitly set in the environment.
//
@@ -778,33 +940,7 @@ void replaceSafePointInstructionWithGcStressInstr(UINT32 safePointOffset, LPVOID
if (fGcStressOnDirectCalls.val(CLRConfig::INTERNAL_GcStressOnDirectCalls))
{
- ReturnKind returnKind = targetMD->GetReturnKind(true);
-
- // If the method returns an object then should protect the return object
- if (IsPointerReturnKind(returnKind))
- {
- // replace with corresponding 2 or 4 byte illegal instruction (which roots the return value)
-#if defined(_TARGET_ARM_)
- if (instrLen == 2)
- *((WORD*)instrPtr) = INTERRUPT_INSTR_PROTECT_RET;
- else
- *((DWORD*)instrPtr) = INTERRUPT_INSTR_PROTECT_RET_32;
-#elif defined(_TARGET_ARM64_)
- *((DWORD*)instrPtr) = INTERRUPT_INSTR_PROTECT_RET;
-#endif
- }
- else // method does not return an objectref
- {
- // replace with corresponding 2 or 4 byte illegal instruction
-#if defined(_TARGET_ARM_)
- if (instrLen == 2)
- *((WORD*)instrPtr) = INTERRUPT_INSTR;
- else
- *((DWORD*)instrPtr) = INTERRUPT_INSTR_32;
-#elif defined(_TARGET_ARM64_)
- *((DWORD*)instrPtr) = INTERRUPT_INSTR;
-#endif
- }
+ ReplaceInstrAfterCall(instrPtr, targetMD);
}
}
}
@@ -816,10 +952,10 @@ void replaceSafePointInstructionWithGcStressInstr(UINT32 safePointOffset, LPVOID
bool replaceInterruptibleRangesWithGcStressInstr (UINT32 startOffset, UINT32 stopOffset, LPVOID pGCCover)
{
PCODE pCode = NULL;
- SLOT rangeStart = NULL;
- SLOT rangeStop = NULL;
+ PBYTE rangeStart = NULL;
+ PBYTE rangeStop = NULL;
- //Interruptible range can span accross hot & cold region
+ //Interruptible range can span across hot & cold region
int acrossHotRegion = 1; // 1 means range is not across end of hot region & 2 is when it is across end of hot region
//Find the code addresses from offsets
@@ -858,25 +994,14 @@ bool replaceInterruptibleRangesWithGcStressInstr (UINT32 startOffset, UINT32 sto
// Need to do two iterations if interruptible range spans across hot & cold region
while(acrossHotRegion--)
{
- SLOT instrPtr = rangeStart;
+ PBYTE instrPtr = rangeStart;
while(instrPtr < rangeStop)
{
// The instruction about to be replaced cannot already be a gcstress instruction
+ _ASSERTE(!IsGcCoverageInterruptInstruction(instrPtr));
#if defined(_TARGET_ARM_)
size_t instrLen = GetARMInstructionLength(instrPtr);
- if (instrLen == 2)
- {
- _ASSERTE(*((WORD*)instrPtr) != INTERRUPT_INSTR &&
- *((WORD*)instrPtr) != INTERRUPT_INSTR_CALL &&
- *((WORD*)instrPtr) != INTERRUPT_INSTR_PROTECT_RET);
- }
- else
- {
- _ASSERTE(*((DWORD*)instrPtr) != INTERRUPT_INSTR_32 &&
- *((DWORD*)instrPtr) != INTERRUPT_INSTR_CALL_32 &&
- *((DWORD*)instrPtr) != INTERRUPT_INSTR_PROTECT_RET_32);
- }
if (instrLen == 2)
*((WORD*)instrPtr) = INTERRUPT_INSTR;
@@ -889,12 +1014,6 @@ bool replaceInterruptibleRangesWithGcStressInstr (UINT32 startOffset, UINT32 sto
instrPtr += instrLen;
#elif defined(_TARGET_ARM64_)
- {
- _ASSERTE(*((DWORD*)instrPtr) != INTERRUPT_INSTR &&
- *((DWORD*)instrPtr) != INTERRUPT_INSTR_CALL &&
- *((DWORD*)instrPtr) != INTERRUPT_INSTR_PROTECT_RET);
- }
-
// Do not replace with gcstress interrupt instruction at call to JIT_RareDisableHelper
if(!isCallToStopForGCJitHelper(instrPtr))
*((DWORD*)instrPtr) = INTERRUPT_INSTR;
@@ -926,7 +1045,7 @@ bool replaceInterruptibleRangesWithGcStressInstr (UINT32 startOffset, UINT32 sto
// For other architecture we detect call to JIT_RareDisableHelper
// in function OnGcCoverageInterrupt() since getTargetOfCall() can
// get the actual jithelper target.
-bool isCallToStopForGCJitHelper(SLOT instrPtr)
+bool isCallToStopForGCJitHelper(PBYTE instrPtr)
{
#if defined(_TARGET_ARM64_)
if (((*reinterpret_cast<DWORD*>(instrPtr)) & 0xFC000000) == 0x94000000) // Do we have a BL instruction?
@@ -973,7 +1092,7 @@ static size_t getRegVal(unsigned regNum, PCONTEXT regs)
}
/****************************************************************************/
-static SLOT getTargetOfCall(SLOT instrPtr, PCONTEXT regs, SLOT*nextInstr) {
+static PBYTE getTargetOfCall(PBYTE instrPtr, PCONTEXT regs, PBYTE* nextInstr) {
BYTE sibindexadj = 0;
BYTE baseadj = 0;
@@ -984,12 +1103,12 @@ static SLOT getTargetOfCall(SLOT instrPtr, PCONTEXT regs, SLOT*nextInstr) {
// When decoding the instructions of a method which is sprinkled with
// TRAP instructions for GCStress, we decode the bytes from a copy
// of the instructions stored before the traps-for-gc were inserted.
- // Hoiwever, the PC-relative addressing/displacement of the CALL-target
+ // However, the PC-relative addressing/displacement of the CALL-target
// will still be with respect to the currently executing PC.
// So, if a register context is available, we pick the PC from it
// (for address calculation purposes only).
- SLOT PC = (regs) ? (SLOT)GetIP(regs) : instrPtr;
+ PBYTE PC = (regs) ? (PBYTE)GetIP(regs) : instrPtr;
#ifdef _TARGET_ARM_
if((instrPtr[1] & 0xf0) == 0xf0) // direct call
@@ -1061,7 +1180,7 @@ static SLOT getTargetOfCall(SLOT instrPtr, PCONTEXT regs, SLOT*nextInstr) {
// Note that the signed displacement is sign-extended
// to 64-bit on AMD64
- return((SLOT)(base + (SSIZE_T)displacement));
+ return((PBYTE)(base + (SSIZE_T)displacement));
}
if (instrPtr[0] == 0xFF) { // Indirect Absolute Near
@@ -1070,7 +1189,7 @@ static SLOT getTargetOfCall(SLOT instrPtr, PCONTEXT regs, SLOT*nextInstr) {
BYTE mod = (instrPtr[1] & 0xC0) >> 6;
BYTE rm = (instrPtr[1] & 0x7);
- SLOT result;
+ PBYTE result;
switch (mod) {
case 0:
@@ -1145,7 +1264,7 @@ static SLOT getTargetOfCall(SLOT instrPtr, PCONTEXT regs, SLOT*nextInstr) {
result = 0;
#endif // _TARGET_AMD64_
} else {
- result = (SLOT)getRegVal(baseadj + rm, regs);
+ result = (PBYTE)getRegVal(baseadj + rm, regs);
}
if (mod == 0) {
@@ -1174,14 +1293,14 @@ static SLOT getTargetOfCall(SLOT instrPtr, PCONTEXT regs, SLOT*nextInstr) {
//
// Now dereference thru the result to get the resulting IP.
//
- result = (SLOT)(*((SLOT *)result));
+ result = (PBYTE)(*((PBYTE *)result));
break;
case 3:
default:
- result = (SLOT)getRegVal(baseadj + rm, regs);
+ result = (PBYTE)getRegVal(baseadj + rm, regs);
displace += 2;
break;
@@ -1237,25 +1356,19 @@ bool IsGcCoverageInterrupt(LPVOID ip)
return false;
}
- // Now it's safe to dereference the IP to check the instruction
-#if defined(_TARGET_ARM64_)
- UINT32 instructionCode = *reinterpret_cast<UINT32 *>(ip);
-#elif defined(_TARGET_ARM_)
- UINT16 instructionCode = *reinterpret_cast<UINT16 *>(ip);
-#else
- UINT8 instructionCode = *reinterpret_cast<UINT8 *>(ip);
-#endif
- switch (instructionCode)
+ PBYTE instrPtr = reinterpret_cast<PBYTE>(ip);
+
+ if (IsGcCoverageInterruptInstruction(instrPtr))
{
- case INTERRUPT_INSTR:
- case INTERRUPT_INSTR_CALL:
- case INTERRUPT_INSTR_PROTECT_RET:
- return true;
+ return true;
+ }
- default:
- // Another thread may have already changed the code back to the original
- return instructionCode == gcCover->savedCode[codeInfo.GetRelOffset()];
+ if (IsOriginalInstruction(instrPtr, gcCover, codeInfo.GetRelOffset()))
+ {
+ // Another thread may have already changed the code back to the original.
+ return true;
}
+ return false;
}
// Remove the GcCoverage interrupt instruction, and restore the
@@ -1309,10 +1422,10 @@ BOOL OnGcCoverageInterrupt(PCONTEXT regs)
// If this trap instruction is taken in place of CORINFO_HELP_STOP_FOR_GC()
// Do not start a GC, but continue with the original instruction.
// See the comments above SprinkleBreakpoints() function.
- SLOT nextInstr;
- SLOT target = getTargetOfCall(savedInstrPtr, regs, &nextInstr);
+ PBYTE nextInstr;
+ PBYTE target = getTargetOfCall(savedInstrPtr, regs, &nextInstr);
- if (target == (SLOT)JIT_RareDisableHelper) {
+ if (target == (PBYTE)JIT_RareDisableHelper) {
RemoveGcCoverageInterrupt(instrPtr, savedInstrPtr);
return TRUE;
}
@@ -1354,7 +1467,7 @@ FORCEINLINE void UpdateGCStressInstructionWithoutGC ()
void DoGcStress (PCONTEXT regs, MethodDesc *pMD)
{
PCODE controlPc = GetIP(regs);
- TADDR instrPtr = PCODEToPINSTR(controlPc);
+ PBYTE instrPtr = reinterpret_cast<PBYTE>(PCODEToPINSTR(controlPc));
if (!pMD)
{
@@ -1371,42 +1484,51 @@ void DoGcStress (PCONTEXT regs, MethodDesc *pMD)
Thread *pThread = GetThread();
-#if defined(_TARGET_X86_) || defined(_TARGET_AMD64_)
- BYTE instrVal = *(BYTE *)instrPtr;
- forceStack[6] = &instrVal; // This is so I can see it fastchecked
-
- if (instrVal != INTERRUPT_INSTR &&
- instrVal != INTERRUPT_INSTR_CALL &&
- instrVal != INTERRUPT_INSTR_PROTECT_RET) {
- _ASSERTE(instrVal == gcCover->savedCode[offset]); // someone beat us to it.
- return; // Someone beat us to it, just go on running
+ if (!IsGcCoverageInterruptInstruction(instrPtr))
+ {
+ // This assert can fail if another thread changed original instruction to
+ // GCCoverage Interrupt instruction between these two commands. Uncomment it
+ // when threading issue gets resolved.
+ // _ASSERTE(IsOriginalInstruction(instrPtr, gcCover, offset));
+
+ // Someone beat us to it, just go on running.
+ return;
}
- bool atCall = (instrVal == INTERRUPT_INSTR_CALL);
- bool afterCallProtect = (instrVal == INTERRUPT_INSTR_PROTECT_RET);
+ bool atCall;
+ bool afterCallProtect[2] = { false, false };
-#elif defined(_TARGET_ARM_)
+#if defined(_TARGET_X86_) || defined(_TARGET_AMD64_)
- WORD instrVal = *(WORD*)instrPtr;
+ BYTE instrVal = *instrPtr;
forceStack[6] = &instrVal; // This is so I can see it fastchecked
- size_t instrLen = GetARMInstructionLength(instrVal);
+ atCall = (instrVal == INTERRUPT_INSTR_CALL);
- bool atCall;
- bool afterCallProtect;
+ if (instrVal == INTERRUPT_INSTR_PROTECT_BOTH_RET)
+ {
+ afterCallProtect[0] = afterCallProtect[1] = true;
+ }
+ else if (instrVal == INTERRUPT_INSTR_PROTECT_FIRST_RET)
+ {
+ afterCallProtect[0] = true;
+ }
+ else if (instrVal == INTERRUPT_INSTR_PROTECT_SECOND_RET)
+ {
+ afterCallProtect[1] = true;
+ }
+
+#elif defined(_TARGET_ARM_)
+ forceStack[6] = (WORD*)instrPtr; // This is so I can see it fastchecked
+
+ size_t instrLen = GetARMInstructionLength(instrPtr);
if (instrLen == 2)
{
- if (instrVal != INTERRUPT_INSTR &&
- instrVal != INTERRUPT_INSTR_CALL &&
- instrVal != INTERRUPT_INSTR_PROTECT_RET) {
- _ASSERTE(instrVal == *(WORD*)(gcCover->savedCode + offset)); // someone beat us to it.
- return; // Someone beat us to it, just go on running
- }
-
+ WORD instrVal = *(WORD*)instrPtr;
atCall = (instrVal == INTERRUPT_INSTR_CALL);
- afterCallProtect = (instrVal == INTERRUPT_INSTR_PROTECT_RET);
+ afterCallProtect[0] = (instrVal == INTERRUPT_INSTR_PROTECT_RET);
}
else
{
@@ -1414,29 +1536,15 @@ void DoGcStress (PCONTEXT regs, MethodDesc *pMD)
DWORD instrVal32 = *(DWORD*)instrPtr;
- if (instrVal32 != INTERRUPT_INSTR_32 &&
- instrVal32 != INTERRUPT_INSTR_CALL_32 &&
- instrVal32 != INTERRUPT_INSTR_PROTECT_RET_32) {
- _ASSERTE(instrVal32 == *(DWORD*)(gcCover->savedCode + offset)); // someone beat us to it.
- return; // Someone beat us to it, just go on running
- }
-
atCall = (instrVal32 == INTERRUPT_INSTR_CALL_32);
- afterCallProtect = (instrVal32 == INTERRUPT_INSTR_PROTECT_RET_32);
+ afterCallProtect[0] = (instrVal32 == INTERRUPT_INSTR_PROTECT_RET_32);
}
#elif defined(_TARGET_ARM64_)
DWORD instrVal = *(DWORD *)instrPtr;
forceStack[6] = &instrVal; // This is so I can see it fastchecked
- if (instrVal != INTERRUPT_INSTR &&
- instrVal != INTERRUPT_INSTR_CALL &&
- instrVal != INTERRUPT_INSTR_PROTECT_RET) {
- _ASSERTE(instrVal == *(DWORD *)(gcCover->savedCode + offset)); // someone beat us to it.
- return; // Someone beat us to it, just go on running
- }
-
- bool atCall = (instrVal == INTERRUPT_INSTR_CALL);
- bool afterCallProtect = (instrVal == INTERRUPT_INSTR_PROTECT_RET);
+ atCall = (instrVal == INTERRUPT_INSTR_CALL);
+ afterCallProtect[0] = (instrVal == INTERRUPT_INSTR_PROTECT_RET);
#endif // _TARGET_*
@@ -1462,7 +1570,7 @@ void DoGcStress (PCONTEXT regs, MethodDesc *pMD)
}
// If some other thread removes interrupt points, we abandon epilog testing
- // for this routine since the barrier at the begining of the routine may not
+ // for this routine since the barrier at the beginning of the routine may not
// be up anymore, and thus the caller context is now not guaranteed to be correct.
// This should happen only very rarely so is not a big deal.
if (gcCover->callerThread != pThread)
@@ -1550,7 +1658,7 @@ void DoGcStress (PCONTEXT regs, MethodDesc *pMD)
#if defined(_TARGET_X86_) || defined(_TARGET_AMD64_) || defined(_TARGET_ARM_) || defined(_TARGET_ARM64_)
/* In non-fully interrruptable code, if the EIP is just after a call instr
- means something different because it expects that that we are IN the
+ means something different because it expects that we are IN the
called method, not actually at the instruction just after the call. This
is important, because until the called method returns, IT is responsible
for protecting the return value. Thus just after a call instruction
@@ -1584,13 +1692,13 @@ void DoGcStress (PCONTEXT regs, MethodDesc *pMD)
// but it's not been a problem so far.
// see details about <GCStress instruction update race> in comments above
pThread->CommitGCStressInstructionUpdate ();
- BYTE* nextInstr;
- SLOT target = getTargetOfCall((BYTE*) instrPtr, regs, (BYTE**)&nextInstr);
+ PBYTE nextInstr;
+ PBYTE target = getTargetOfCall((BYTE*) instrPtr, regs, (BYTE**)&nextInstr);
if (target != 0)
{
if (!pThread->PreemptiveGCDisabled())
{
- // We are in preemtive mode in JITTed code. This implies that we are into IL stub
+ // We are in preemptive mode in JITTed code. This implies that we are into IL stub
// close to PINVOKE method. This call will never return objectrefs.
#ifdef _TARGET_ARM_
size_t instrLen = GetARMInstructionLength(nextInstr);
@@ -1614,32 +1722,7 @@ void DoGcStress (PCONTEXT regs, MethodDesc *pMD)
// It could become a problem if 64bit does partially interrupt work.
// OK, we have the MD, mark the instruction after the CALL
// appropriately
-
- ReturnKind returnKind = targetMD->GetReturnKind(true);
- bool protectReturn = IsPointerReturnKind(returnKind);
-#ifdef _TARGET_ARM_
- size_t instrLen = GetARMInstructionLength(nextInstr);
- if (protectReturn)
- if (instrLen == 2)
- *(WORD*)nextInstr = INTERRUPT_INSTR_PROTECT_RET;
- else
- *(DWORD*)nextInstr = INTERRUPT_INSTR_PROTECT_RET_32;
- else
- if (instrLen == 2)
- *(WORD*)nextInstr = INTERRUPT_INSTR;
- else
- *(DWORD*)nextInstr = INTERRUPT_INSTR_32;
-#elif defined(_TARGET_ARM64_)
- if (protectReturn)
- *(DWORD *)nextInstr = INTERRUPT_INSTR_PROTECT_RET;
- else
- *(DWORD *)nextInstr = INTERRUPT_INSTR;
-#else
- if (protectReturn)
- *nextInstr = INTERRUPT_INSTR_PROTECT_RET;
- else
- *nextInstr = INTERRUPT_INSTR;
-#endif
+ ReplaceInstrAfterCall(nextInstr, targetMD);
}
}
}
@@ -1649,7 +1732,7 @@ void DoGcStress (PCONTEXT regs, MethodDesc *pMD)
FlushInstructionCache(GetCurrentProcess(), (LPCVOID)instrPtr, 10);
// It's not GC safe point, the GC Stress instruction is
- // already commited and interrupt is already put at next instruction so we just return.
+ // already committed and interrupt is already put at next instruction so we just return.
return;
}
#else
@@ -1692,26 +1775,38 @@ void DoGcStress (PCONTEXT regs, MethodDesc *pMD)
frame.Push(pThread);
#endif // USE_REDIRECT_FOR_GCSTRESS
-#if defined(_TARGET_X86_) || defined(_TARGET_AMD64_) || defined(_TARGET_ARM_) || defined(_TARGET_ARM64_)
- FrameWithCookie<GCFrame> gcFrame;
- DWORD_PTR retVal = 0;
- if (afterCallProtect) // Do I need to protect return value?
- {
-#ifdef _TARGET_AMD64_
- retVal = regs->Rax;
+ DWORD_PTR retValRegs[2] = { 0 };
+ UINT numberOfRegs = 0;
+
+ if (afterCallProtect[0])
+ {
+#if defined(_TARGET_AMD64_)
+ retValRegs[numberOfRegs++] = regs->Rax;
#elif defined(_TARGET_X86_)
- retVal = regs->Eax;
-#elif defined(_TARGET_ARM_)
- retVal = regs->R0;
+ retValRegs[numberOfRegs++] = regs->Eax;
+#elif defined(_TARGET_ARM_)
+ retValRegs[numberOfRegs++] = regs->R0;
#elif defined(_TARGET_ARM64_)
- retVal = regs->X0;
-#else
- PORTABILITY_ASSERT("DoGCStress - return register");
-#endif
- gcFrame.Init(pThread, (OBJECTREF*) &retVal, 1, TRUE);
+ retValRegs[numberOfRegs++] = regs->X0;
+#endif // _TARGET_ARM64_
+ }
+
+ if (afterCallProtect[1])
+ {
+#if defined(_TARGET_AMD64_) && defined(PLATFORM_UNIX)
+ retValRegs[numberOfRegs++] = regs->Rdx;
+#else // !_TARGET_AMD64_ || !PLATFORM_UNIX
+ _ASSERTE(!"Not expected multi reg return with pointers.");
+#endif // !_TARGET_AMD64_ || !PLATFORM_UNIX
+ }
+
+ FrameWithCookie<GCFrame> gcFrame;
+ if (numberOfRegs != 0)
+ {
+ _ASSERTE(sizeof(OBJECTREF) == sizeof(DWORD_PTR));
+ gcFrame.Init(pThread, (OBJECTREF*)retValRegs, numberOfRegs, TRUE);
}
-#endif // _TARGET_*
if (gcCover->lastMD != pMD)
{
@@ -1745,23 +1840,34 @@ void DoGcStress (PCONTEXT regs, MethodDesc *pMD)
CONSISTENCY_CHECK(!pThread->HasPendingGCStressInstructionUpdate());
-#if defined(_TARGET_X86_) || defined(_TARGET_AMD64_) || defined(_TARGET_ARM_) || defined(_TARGET_ARM64_)
- if (afterCallProtect)
+ if (numberOfRegs != 0)
{
-#ifdef _TARGET_AMD64_
- regs->Rax = retVal;
+ if (afterCallProtect[0])
+ {
+#if defined(_TARGET_AMD64_)
+ regs->Rax = retValRegs[0];
#elif defined(_TARGET_X86_)
- regs->Eax = retVal;
+ regs->Eax = retValRegs[0];
#elif defined(_TARGET_ARM_)
- regs->R0 = retVal;
+ regs->R0 = retValRegs[0];
#elif defined(_TARGET_ARM64_)
- regs->X[0] = retVal;
+ regs->X[0] = retValRegs[0];
#else
- PORTABILITY_ASSERT("DoGCStress - return register");
-#endif
+ PORTABILITY_ASSERT("DoGCStress - return register");
+#endif
+ }
+
+ if (afterCallProtect[1])
+ {
+#if defined(_TARGET_AMD64_) && defined(PLATFORM_UNIX)
+ regs->Rdx = retValRegs[numberOfRegs - 1];
+#else // !_TARGET_AMD64_ || !PLATFORM_UNIX
+ _ASSERTE(!"Not expected multi reg return with pointers.");
+#endif // !_TARGET_AMD64_ || !PLATFORM_UNIX
+ }
+
gcFrame.Pop();
}
-#endif // _TARGET_*
#if !defined(USE_REDIRECT_FOR_GCSTRESS)
frame.Pop(pThread);
diff --git a/src/vm/gccover.h b/src/vm/gccover.h
index 7835e8c990..6588975cd0 100644
--- a/src/vm/gccover.h
+++ b/src/vm/gccover.h
@@ -68,9 +68,11 @@ public:
#if defined(_TARGET_X86_) || defined(_TARGET_AMD64_)
-#define INTERRUPT_INSTR 0xF4 // X86 HLT instruction (any 1 byte illegal instruction will do)
-#define INTERRUPT_INSTR_CALL 0xFA // X86 CLI instruction
-#define INTERRUPT_INSTR_PROTECT_RET 0xFB // X86 STI instruction
+#define INTERRUPT_INSTR 0xF4 // X86 HLT instruction (any 1 byte illegal instruction will do)
+#define INTERRUPT_INSTR_CALL 0xFA // X86 CLI instruction
+#define INTERRUPT_INSTR_PROTECT_FIRST_RET 0xFB // X86 STI instruction, protect the first return register
+#define INTERRUPT_INSTR_PROTECT_SECOND_RET 0xEC // X86 IN instruction, protect the second return register
+#define INTERRUPT_INSTR_PROTECT_BOTH_RET 0xED // X86 IN instruction, protect both return registers
#elif defined(_TARGET_ARM_)
diff --git a/src/vm/method.cpp b/src/vm/method.cpp
index c17901582f..d0f5bfa2b4 100644
--- a/src/vm/method.cpp
+++ b/src/vm/method.cpp
@@ -1284,8 +1284,17 @@ ReturnKind MethodDesc::ParseReturnKindFromSig(INDEBUG(bool supportStringConstruc
if (pReturnTypeMT->ContainsPointers())
{
- _ASSERTE(pReturnTypeMT->GetNumInstanceFieldBytes() == sizeof(void*));
- return RT_Object;
+ if (pReturnTypeMT->GetNumInstanceFields() == 1)
+ {
+ _ASSERTE(pReturnTypeMT->GetNumInstanceFieldBytes() == sizeof(void*));
+ // Note: we can't distinguish RT_Object from RT_ByRef, the caller has to tolerate that.
+ return RT_Object;
+ }
+ else
+ {
+ // Multi reg return case with pointers, can't restore the actual kind.
+ return RT_Illegal;
+ }
}
}
}
diff --git a/src/vm/method.hpp b/src/vm/method.hpp
index ca2e88fdcc..3122e4e91a 100644
--- a/src/vm/method.hpp
+++ b/src/vm/method.hpp
@@ -1666,6 +1666,12 @@ private:
ReturnKind ParseReturnKindFromSig(INDEBUG(bool supportStringConstructors = false));
public:
+ // This method is used to restore ReturnKind using the class handle, it is fully supported only on x64 Ubuntu,
+ // other platforms do not support multi-reg return case with pointers.
+ // Use this method only when you can't hit this case
+ // (like ComPlusMethodFrame::GcScanRoots) or when you can tolerate RT_Illegal return.
+ // Also, on the other platforms for a single field struct return case
+ // the function can't distinguish RT_Object and RT_ByRef.
ReturnKind GetReturnKind(INDEBUG(bool supportStringConstructors = false));
public: