summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSergey Andreenko <seandree@microsoft.com>2019-06-05 17:25:24 -0700
committerGitHub <noreply@github.com>2019-06-05 17:25:24 -0700
commit3dd303f0004ed4771bc29167df30efda07e4cf7e (patch)
tree164a0fdc4a5f66dd3cecac12b1ebf20743ff0535
parent5d4ff2f11a87d7d434d05e72744946f48e017b11 (diff)
downloadcoreclr-3dd303f0004ed4771bc29167df30efda07e4cf7e.tar.gz
coreclr-3dd303f0004ed4771bc29167df30efda07e4cf7e.tar.bz2
coreclr-3dd303f0004ed4771bc29167df30efda07e4cf7e.zip
Fix GCStress coverage for multi reg returns. (#24826)
* Extract ReplaceInstrAfterCall. * Avoid GCStress when return multireg with pointers. Determinate when we need to protect the second register and do not cause GCStress in such cases. * Add a repro test. * Reenable MethodImplOptionsTests. * Extract IsGcCoveregeInterruptInstruction. That changes how we do checks for arm32 in `IsGcCoverageInterrupt`. * Tolerate direct call to JIT_RareDisableHelper. x86 ILStubClass:IL_STUB_PInvoke(byref,ref,int,byref):int generates it like: Generating: N119 ( 4, 7) [000118] ------------ * RETURNTRAP int REG NA IN0021: cmp dword ptr [0F9BF9F8H], 0 New Basic Block BB10 [0009] created. IN0022: je L_M6496_BB10 Call: GCvars=00000001 {V01}, gcrefRegs=00000000 {}, byrefRegs=00000000 {} IN0023: call CORINFO_HELP_STOP_FOR_GC * Support GC stress protect return 1/2/both Unix x64. * Fix arm64. Do not insert GC Stress instrucitons when we can't determinate the exact return kind. * Fix review1. * Fix review2. * Change the test as Andy suggested. * Fix some typos. * Replace all SLOT with PBYTE. * Disable assert that can fail because of multithreading.
-rw-r--r--src/inc/gcinfotypes.h6
-rw-r--r--src/vm/gccover.cpp588
-rw-r--r--src/vm/gccover.h8
-rw-r--r--src/vm/method.cpp13
-rw-r--r--src/vm/method.hpp6
-rw-r--r--tests/src/JIT/Regression/JitBlue/GitHub_23199/GitHub_23199.cs201
-rw-r--r--tests/src/JIT/Regression/JitBlue/GitHub_23199/GitHub_23199.csproj49
-rw-r--r--tests/src/ilasm/System/Runtime/CompilerServices/MethodImplOptionsTests.csproj2
8 files changed, 625 insertions, 248 deletions
diff --git a/src/inc/gcinfotypes.h b/src/inc/gcinfotypes.h
index 652ab9fbff..d45b9a6292 100644
--- a/src/inc/gcinfotypes.h
+++ b/src/inc/gcinfotypes.h
@@ -251,6 +251,12 @@ inline bool IsValidFieldReturnKind(ReturnKind returnKind)
return (returnKind == RT_Scalar || returnKind == RT_Object || returnKind == RT_ByRef);
}
+inline bool IsPointerFieldReturnKind(ReturnKind returnKind)
+{
+ _ASSERTE(IsValidFieldReturnKind(returnKind));
+ return (returnKind == RT_Object || returnKind == RT_ByRef);
+}
+
inline bool IsValidReturnRegister(size_t regNo)
{
return (regNo == 0)
diff --git a/src/vm/gccover.cpp b/src/vm/gccover.cpp
index be4aa36bd6..c5cbdb0c3c 100644
--- a/src/vm/gccover.cpp
+++ b/src/vm/gccover.cpp
@@ -36,8 +36,8 @@
/****************************************************************************/
MethodDesc* AsMethodDesc(size_t addr);
-static SLOT getTargetOfCall(SLOT instrPtr, PCONTEXT regs, SLOT*nextInstr);
-bool isCallToStopForGCJitHelper(SLOT instrPtr);
+static PBYTE getTargetOfCall(PBYTE instrPtr, PCONTEXT regs, PBYTE*nextInstr);
+bool isCallToStopForGCJitHelper(PBYTE instrPtr);
#if defined(_TARGET_ARM_) || defined(_TARGET_ARM64_)
static void replaceSafePointInstructionWithGcStressInstr(UINT32 safePointOffset, LPVOID codeStart);
static bool replaceInterruptibleRangesWithGcStressInstr (UINT32 startOffset, UINT32 stopOffset, LPVOID codeStart);
@@ -73,6 +73,94 @@ static MethodDesc* getTargetMethodDesc(PCODE target)
}
+bool IsGcCoverageInterruptInstruction(PBYTE instrPtr)
+{
+#if defined(_TARGET_ARM64_)
+ UINT32 instrVal = *reinterpret_cast<UINT32*>(instrPtr);
+ switch (instrVal)
+ {
+ case INTERRUPT_INSTR:
+ case INTERRUPT_INSTR_CALL:
+ case INTERRUPT_INSTR_PROTECT_RET:
+ return true;
+ default:
+ return false;
+ }
+#elif defined(_TARGET_ARM_)
+
+ size_t instrLen = GetARMInstructionLength(instrPtr);
+ if (instrLen == 2)
+ {
+ UINT16 instrVal = *reinterpret_cast<UINT16*>(instrPtr);
+ switch (instrVal)
+ {
+ case INTERRUPT_INSTR:
+ case INTERRUPT_INSTR_CALL:
+ case INTERRUPT_INSTR_PROTECT_RET:
+ return true;
+ default:
+ return false;
+ }
+ }
+ else
+ {
+ _ASSERTE(instrLen == 4);
+ UINT32 instrVal = *reinterpret_cast<UINT32*>(instrPtr);
+ switch (instrVal)
+ {
+ case INTERRUPT_INSTR_32:
+ case INTERRUPT_INSTR_CALL_32:
+ case INTERRUPT_INSTR_PROTECT_RET_32:
+ return true;
+ default:
+ return false;
+ }
+ }
+#else // x64 and x86
+ UINT8 instrVal = *reinterpret_cast<UINT8*>(instrPtr);
+ switch (instrVal)
+ {
+ case INTERRUPT_INSTR:
+ case INTERRUPT_INSTR_CALL:
+ case INTERRUPT_INSTR_PROTECT_FIRST_RET:
+ case INTERRUPT_INSTR_PROTECT_SECOND_RET:
+ case INTERRUPT_INSTR_PROTECT_BOTH_RET:
+ return true;
+ default:
+ return false;
+ }
+#endif
+}
+
+bool IsOriginalInstruction(PBYTE instrPtr, GCCoverageInfo* gcCover, DWORD offset)
+{
+#if defined(_TARGET_ARM64_)
+ UINT32 instrVal = *reinterpret_cast<UINT32*>(instrPtr);
+ UINT32 origInstrVal = *reinterpret_cast<UINT32*>(gcCover->savedCode + offset);
+ return (instrVal == origInstrVal);
+#elif defined(_TARGET_ARM_)
+ size_t instrLen = GetARMInstructionLength(instrPtr);
+ if (instrLen == 2)
+ {
+ UINT16 instrVal = *reinterpret_cast<UINT16*>(instrPtr);
+ UINT16 origInstrVal = *reinterpret_cast<UINT16*>(gcCover->savedCode + offset);
+ return (instrVal == origInstrVal);
+ }
+ else
+ {
+ _ASSERTE(instrLen == 4);
+ UINT32 instrVal = *reinterpret_cast<UINT32*>(instrPtr);
+ UINT32 origInstrVal = *reinterpret_cast<UINT32*>(gcCover->savedCode + offset);
+ return (instrVal == origInstrVal);
+ }
+#else // x64 and x86
+ UINT8 instrVal = *reinterpret_cast<UINT8*>(instrPtr);
+ UINT8 origInstrVal = gcCover->savedCode[offset];
+ return (instrVal == origInstrVal);
+#endif
+}
+
+
void SetupAndSprinkleBreakpoints(
MethodDesc * pMD,
EECodeInfo * pCodeInfo,
@@ -250,6 +338,94 @@ void SetupGcCoverageForNativeImage(Module* module)
}
#endif
+
+void ReplaceInstrAfterCall(PBYTE instrToReplace, MethodDesc* callMD)
+{
+ ReturnKind returnKind = callMD->GetReturnKind(true);
+ if (!IsValidReturnKind(returnKind))
+ {
+#if defined(_TARGET_AMD64_) && defined(PLATFORM_UNIX)
+ _ASSERTE(!"Unexpected return kind for x64 Unix.");
+#else
+ // SKip GC coverage after the call.
+ return;
+#endif
+ }
+ _ASSERTE(IsValidReturnKind(returnKind));
+
+ bool ispointerKind = IsPointerReturnKind(returnKind);
+#ifdef _TARGET_ARM_
+ size_t instrLen = GetARMInstructionLength(instrToReplace);
+ bool protectReturn = ispointerKind;
+ if (protectReturn)
+ if (instrLen == 2)
+ *(WORD*)instrToReplace = INTERRUPT_INSTR_PROTECT_RET;
+ else
+ *(DWORD*)instrToReplace = INTERRUPT_INSTR_PROTECT_RET_32;
+ else
+ if (instrLen == 2)
+ *(WORD*)instrToReplace = INTERRUPT_INSTR;
+ else
+ *(DWORD*)instrToReplace = INTERRUPT_INSTR_32;
+#elif defined(_TARGET_ARM64_)
+ bool protectReturn = ispointerKind;
+ if (protectReturn)
+ *(DWORD*)instrToReplace = INTERRUPT_INSTR_PROTECT_RET;
+ else
+ *(DWORD*)instrToReplace = INTERRUPT_INSTR;
+#elif defined(_TARGET_AMD64_) || defined(_TARGET_X86_)
+
+
+ if (ispointerKind)
+ {
+ bool protectRegister[2] = { false, false };
+
+ bool moreRegisters = false;
+
+ ReturnKind fieldKind1 = ExtractRegReturnKind(returnKind, 0, moreRegisters);
+ if (IsPointerFieldReturnKind(fieldKind1))
+ {
+ protectRegister[0] = true;
+ }
+ if (moreRegisters)
+ {
+ ReturnKind fieldKind2 = ExtractRegReturnKind(returnKind, 1, moreRegisters);
+ if (IsPointerFieldReturnKind(fieldKind2))
+ {
+ protectRegister[1] = true;
+ }
+ }
+ _ASSERTE(!moreRegisters);
+
+ if (protectRegister[0] && !protectRegister[1])
+ {
+ *instrToReplace = INTERRUPT_INSTR_PROTECT_FIRST_RET;
+ }
+ else
+ {
+#if !defined(_TARGET_AMD64_) || !defined(PLATFORM_UNIX)
+ _ASSERTE(!"Not expected multi reg return with pointers.");
+#endif // !_TARGET_AMD64_ || !PLATFORM_UNIX
+ if (!protectRegister[0] && protectRegister[1])
+ {
+ *instrToReplace = INTERRUPT_INSTR_PROTECT_SECOND_RET;
+ }
+ else
+ {
+ _ASSERTE(protectRegister[0] && protectRegister[1]);
+ *instrToReplace = INTERRUPT_INSTR_PROTECT_BOTH_RET;
+ }
+ }
+ }
+ else
+ {
+ *instrToReplace = INTERRUPT_INSTR;
+ }
+#else
+ _ASSERTE(!"not implemented for platform");
+#endif
+}
+
#ifdef _TARGET_AMD64_
class GCCoverageRangeEnumerator
@@ -335,9 +511,9 @@ public:
#endif // _TARGET_AMD64_
-// When Sprinking break points, we must make sure that certain calls to
-// Thread-suspension routines inlined into the managed method are not
-// converted to GC-Stress points. Otherwise, this will lead to race
+// When sprinkling break points, we must make sure that certain calls to
+// Thread-suspension routines inlined into the managed method are not
+// converted to GC-Stress points. Otherwise, this will lead to race
// conditions with the GC.
//
// For example, for an inlined PInvoke stub, the JIT generates the following code
@@ -391,9 +567,9 @@ public:
extern "C" FCDECL0(VOID, JIT_RareDisableHelper);
/****************************************************************************/
-/* sprinkle interupt instructions that will stop on every GCSafe location
+/* sprinkle interrupt instructions that will stop on every GCSafe location
regionOffsetAdj - Represents the offset of the current region
- from the beginning of the method (is 0 for hot region)
+ from the beginning of the method (is 0 for hot region)
*/
void GCCoverageInfo::SprinkleBreakpoints(
@@ -416,7 +592,7 @@ void GCCoverageInfo::SprinkleBreakpoints(
ClrVirtualProtect(codeStart, codeSize, PAGE_EXECUTE_READWRITE, &oldProtect);
}
- SLOT cur;
+ PBYTE cur;
BYTE* codeEnd = codeStart + codeSize;
EECodeInfo codeInfo((PCODE)codeStart);
@@ -471,7 +647,7 @@ void GCCoverageInfo::SprinkleBreakpoints(
// for code. It uses some for switch tables. Because the first few offsets
// may be decodable as instructions, we can't reason about where we should
// encounter invalid instructions. However, we do not want to silently skip
- // large chunks of methods just becuase the JIT started emitting a new
+ // large chunks of methods just because the JIT started emitting a new
// instruction, so only assume it is a switch table if we've seen the switch
// code (an indirect unconditional jump)
if ((len == 0) && fSawPossibleSwitch)
@@ -487,6 +663,8 @@ void GCCoverageInfo::SprinkleBreakpoints(
_ASSERTE(len > 0);
_ASSERTE(len <= (size_t)(codeEnd-cur));
+ bool skipGCForCurrentInstr = false;
+
switch(instructionType)
{
case InstructionType::Call_IndirectUnconditional:
@@ -505,14 +683,16 @@ void GCCoverageInfo::SprinkleBreakpoints(
if(safePointDecoder.IsSafePoint((UINT32)(cur + len - codeStart + regionOffsetAdj)))
#endif
{
- SLOT nextInstr;
- SLOT target = getTargetOfCall(cur, NULL, &nextInstr);
+ PBYTE nextInstr;
+ PBYTE target = getTargetOfCall(cur, NULL, &nextInstr);
if (target != 0)
{
- // JIT_RareDisableHelper() is expected to be an indirect call.
- // If we encounter a direct call (in future), skip the call
- _ASSERTE(target != (SLOT)JIT_RareDisableHelper);
+ if (target == (PBYTE)JIT_RareDisableHelper)
+ {
+ // Skip the call to JIT_RareDisableHelper.
+ skipGCForCurrentInstr = true;
+ }
targetMD = getTargetMethodDesc((PCODE)target);
}
}
@@ -530,30 +710,31 @@ void GCCoverageInfo::SprinkleBreakpoints(
break;
}
- if (prevDirectCallTargetMD != 0)
+ if (!skipGCForCurrentInstr)
{
- ReturnKind returnKind = prevDirectCallTargetMD->GetReturnKind(true);
- if (IsPointerReturnKind(returnKind))
- *cur = INTERRUPT_INSTR_PROTECT_RET;
- else
- *cur = INTERRUPT_INSTR;
- }
+ if (prevDirectCallTargetMD != 0)
+ {
+ ReplaceInstrAfterCall(cur, prevDirectCallTargetMD);
+ }
- // For fully interruptible code, we end up whacking every instruction
- // to INTERRUPT_INSTR. For non-fully interruptible code, we end
- // up only touching the call instructions (specially so that we
- // can really do the GC on the instruction just after the call).
- _ASSERTE(FitsIn<DWORD>((cur - codeStart) + regionOffsetAdj));
- if (codeMan->IsGcSafe(&codeInfo, static_cast<DWORD>((cur - codeStart) + regionOffsetAdj)))
- *cur = INTERRUPT_INSTR;
+ // For fully interruptible code, we end up whacking every instruction
+ // to INTERRUPT_INSTR. For non-fully interruptible code, we end
+ // up only touching the call instructions (specially so that we
+ // can really do the GC on the instruction just after the call).
+ size_t dwRelOffset = (cur - codeStart) + regionOffsetAdj;
+ _ASSERTE(FitsIn<DWORD>(dwRelOffset));
+ if (codeMan->IsGcSafe(&codeInfo, static_cast<DWORD>(dwRelOffset)))
+ *cur = INTERRUPT_INSTR;
#ifdef _TARGET_X86_
- // we will whack every instruction in the prolog and epilog to make certain
- // our unwinding logic works there.
- if (codeMan->IsInPrologOrEpilog((cur - codeStart) + (DWORD)regionOffsetAdj, gcInfoToken, NULL)) {
- *cur = INTERRUPT_INSTR;
- }
+ // we will whack every instruction in the prolog and epilog to make certain
+ // our unwinding logic works there.
+ if (codeMan->IsInPrologOrEpilog((cur - codeStart) + (DWORD)regionOffsetAdj, gcInfoToken, NULL))
+ {
+ *cur = INTERRUPT_INSTR;
+ }
#endif
+ }
// If we couldn't find the method desc targetMD is zero
prevDirectCallTargetMD = targetMD;
@@ -561,7 +742,7 @@ void GCCoverageInfo::SprinkleBreakpoints(
cur += len;
#ifdef _TARGET_AMD64_
- SLOT newCur = rangeEnum.EnsureInRange(cur);
+ PBYTE newCur = rangeEnum.EnsureInRange(cur);
if(newCur != cur)
{
prevDirectCallTargetMD = NULL;
@@ -573,7 +754,7 @@ void GCCoverageInfo::SprinkleBreakpoints(
// If we are not able to place an interrupt at the first instruction, this means that
// we are partially interruptible with no prolog. Just don't bother to do the
- // the epilog checks, since the epilog will be trival (a single return instr)
+ // the epilog checks, since the epilog will be trivial (a single return instr)
assert(codeSize > 0);
if ((regionOffsetAdj==0) && (*codeStart != INTERRUPT_INSTR))
doingEpilogChecks = false;
@@ -647,7 +828,7 @@ void replaceSafePointInstructionWithGcStressInstr(UINT32 safePointOffset, LPVOID
return;
}
- SLOT instrPtr = (BYTE*)PCODEToPINSTR(pCode);
+ PBYTE instrPtr = (BYTE*)PCODEToPINSTR(pCode);
// For code sequences of the type
// BL func1
@@ -657,7 +838,7 @@ void replaceSafePointInstructionWithGcStressInstr(UINT32 safePointOffset, LPVOID
// However as the first safe point is already replaced with gcstress instruction, decoding of the call
// instruction will fail when processing for the 2nd safe point. Therefore saved instruction must be used instead of
// instrPtr for decoding the call instruction.
- SLOT savedInstrPtr = ((GCCoverageInfo*)pGCCover)->savedCode + safePointOffset;
+ PBYTE savedInstrPtr = ((GCCoverageInfo*)pGCCover)->savedCode + safePointOffset;
//Determine if instruction before the safe point is call using immediate (BLX Imm) or call by register (BLX Rm)
BOOL instructionIsACallThroughRegister = FALSE;
@@ -707,7 +888,7 @@ void replaceSafePointInstructionWithGcStressInstr(UINT32 safePointOffset, LPVOID
if(instructionIsACallThroughRegister)
{
// If it is call by register then cannot know MethodDesc so replace the call instruction with illegal instruction
- // safe point will be replaced with appropiate illegal instruction at execution time when reg value is known
+ // safe point will be replaced with appropriate illegal instruction at execution time when reg value is known
#if defined(_TARGET_ARM_)
*((WORD*)instrPtr - 1) = INTERRUPT_INSTR_CALL;
#elif defined(_TARGET_ARM64_)
@@ -717,8 +898,8 @@ void replaceSafePointInstructionWithGcStressInstr(UINT32 safePointOffset, LPVOID
else if(instructionIsACallThroughImmediate)
{
// If it is call by immediate then find the methodDesc
- SLOT nextInstr;
- SLOT target = getTargetOfCall((SLOT)((WORD*)savedInstrPtr-2), NULL, &nextInstr);
+ PBYTE nextInstr;
+ PBYTE target = getTargetOfCall((PBYTE)((WORD*)savedInstrPtr-2), NULL, &nextInstr);
if (target != 0)
{
@@ -733,43 +914,24 @@ void replaceSafePointInstructionWithGcStressInstr(UINT32 safePointOffset, LPVOID
{
// The instruction about to be replaced cannot already be a gcstress instruction
-#if defined(_TARGET_ARM_)
- size_t instrLen = GetARMInstructionLength(instrPtr);
- if (instrLen == 2)
- {
- _ASSERTE(*((WORD*)instrPtr) != INTERRUPT_INSTR &&
- *((WORD*)instrPtr) != INTERRUPT_INSTR_CALL &&
- *((WORD*)instrPtr) != INTERRUPT_INSTR_PROTECT_RET);
- }
- else
- {
- _ASSERTE(*((DWORD*)instrPtr) != INTERRUPT_INSTR_32 &&
- *((DWORD*)instrPtr) != INTERRUPT_INSTR_CALL_32 &&
- *((DWORD*)instrPtr) != INTERRUPT_INSTR_PROTECT_RET_32);
- }
-#elif defined(_TARGET_ARM64_)
- {
- _ASSERTE(*((DWORD*)instrPtr) != INTERRUPT_INSTR &&
- *((DWORD*)instrPtr) != INTERRUPT_INSTR_CALL &&
- *((DWORD*)instrPtr) != INTERRUPT_INSTR_PROTECT_RET);
- }
-#endif
+ _ASSERTE(!IsGcCoverageInterruptInstruction(instrPtr));
+
//
// When applying GC coverage breakpoints at native image load time, the code here runs
// before eager fixups are applied for the module being loaded. The direct call target
// never requires restore, however it is possible that it is initially in an invalid state
// and remains invalid until one or more eager fixups are applied.
//
- // GetReturnKind consults the method signature, meaning it consults the
+ // ReplaceInstrAfterCall consults the method signature, meaning it consults the
// metadata in the owning module. For generic instantiations stored in non-preferred
// modules, reaching the owning module requires following the module override pointer for
// the enclosing MethodTable. In this case, the module override pointer is generally
// invalid until an associated eager fixup is applied.
//
- // In situations like this, GetReturnKind will try to dereference an
+ // In situations like this, ReplaceInstrAfterCall will try to dereference an
// unresolved fixup and will AV.
//
- // Given all of this, skip the GetReturnKind call by default to avoid
+ // Given all of this, skip the ReplaceInstrAfterCall call by default to avoid
// unexpected AVs. This implies leaving out the GC coverage breakpoints for direct calls
// unless COMPlus_GcStressOnDirectCalls=1 is explicitly set in the environment.
//
@@ -778,33 +940,7 @@ void replaceSafePointInstructionWithGcStressInstr(UINT32 safePointOffset, LPVOID
if (fGcStressOnDirectCalls.val(CLRConfig::INTERNAL_GcStressOnDirectCalls))
{
- ReturnKind returnKind = targetMD->GetReturnKind(true);
-
- // If the method returns an object then should protect the return object
- if (IsPointerReturnKind(returnKind))
- {
- // replace with corresponding 2 or 4 byte illegal instruction (which roots the return value)
-#if defined(_TARGET_ARM_)
- if (instrLen == 2)
- *((WORD*)instrPtr) = INTERRUPT_INSTR_PROTECT_RET;
- else
- *((DWORD*)instrPtr) = INTERRUPT_INSTR_PROTECT_RET_32;
-#elif defined(_TARGET_ARM64_)
- *((DWORD*)instrPtr) = INTERRUPT_INSTR_PROTECT_RET;
-#endif
- }
- else // method does not return an objectref
- {
- // replace with corresponding 2 or 4 byte illegal instruction
-#if defined(_TARGET_ARM_)
- if (instrLen == 2)
- *((WORD*)instrPtr) = INTERRUPT_INSTR;
- else
- *((DWORD*)instrPtr) = INTERRUPT_INSTR_32;
-#elif defined(_TARGET_ARM64_)
- *((DWORD*)instrPtr) = INTERRUPT_INSTR;
-#endif
- }
+ ReplaceInstrAfterCall(instrPtr, targetMD);
}
}
}
@@ -816,10 +952,10 @@ void replaceSafePointInstructionWithGcStressInstr(UINT32 safePointOffset, LPVOID
bool replaceInterruptibleRangesWithGcStressInstr (UINT32 startOffset, UINT32 stopOffset, LPVOID pGCCover)
{
PCODE pCode = NULL;
- SLOT rangeStart = NULL;
- SLOT rangeStop = NULL;
+ PBYTE rangeStart = NULL;
+ PBYTE rangeStop = NULL;
- //Interruptible range can span accross hot & cold region
+ //Interruptible range can span across hot & cold region
int acrossHotRegion = 1; // 1 means range is not across end of hot region & 2 is when it is across end of hot region
//Find the code addresses from offsets
@@ -858,25 +994,14 @@ bool replaceInterruptibleRangesWithGcStressInstr (UINT32 startOffset, UINT32 sto
// Need to do two iterations if interruptible range spans across hot & cold region
while(acrossHotRegion--)
{
- SLOT instrPtr = rangeStart;
+ PBYTE instrPtr = rangeStart;
while(instrPtr < rangeStop)
{
// The instruction about to be replaced cannot already be a gcstress instruction
+ _ASSERTE(!IsGcCoverageInterruptInstruction(instrPtr));
#if defined(_TARGET_ARM_)
size_t instrLen = GetARMInstructionLength(instrPtr);
- if (instrLen == 2)
- {
- _ASSERTE(*((WORD*)instrPtr) != INTERRUPT_INSTR &&
- *((WORD*)instrPtr) != INTERRUPT_INSTR_CALL &&
- *((WORD*)instrPtr) != INTERRUPT_INSTR_PROTECT_RET);
- }
- else
- {
- _ASSERTE(*((DWORD*)instrPtr) != INTERRUPT_INSTR_32 &&
- *((DWORD*)instrPtr) != INTERRUPT_INSTR_CALL_32 &&
- *((DWORD*)instrPtr) != INTERRUPT_INSTR_PROTECT_RET_32);
- }
if (instrLen == 2)
*((WORD*)instrPtr) = INTERRUPT_INSTR;
@@ -889,12 +1014,6 @@ bool replaceInterruptibleRangesWithGcStressInstr (UINT32 startOffset, UINT32 sto
instrPtr += instrLen;
#elif defined(_TARGET_ARM64_)
- {
- _ASSERTE(*((DWORD*)instrPtr) != INTERRUPT_INSTR &&
- *((DWORD*)instrPtr) != INTERRUPT_INSTR_CALL &&
- *((DWORD*)instrPtr) != INTERRUPT_INSTR_PROTECT_RET);
- }
-
// Do not replace with gcstress interrupt instruction at call to JIT_RareDisableHelper
if(!isCallToStopForGCJitHelper(instrPtr))
*((DWORD*)instrPtr) = INTERRUPT_INSTR;
@@ -926,7 +1045,7 @@ bool replaceInterruptibleRangesWithGcStressInstr (UINT32 startOffset, UINT32 sto
// For other architecture we detect call to JIT_RareDisableHelper
// in function OnGcCoverageInterrupt() since getTargetOfCall() can
// get the actual jithelper target.
-bool isCallToStopForGCJitHelper(SLOT instrPtr)
+bool isCallToStopForGCJitHelper(PBYTE instrPtr)
{
#if defined(_TARGET_ARM64_)
if (((*reinterpret_cast<DWORD*>(instrPtr)) & 0xFC000000) == 0x94000000) // Do we have a BL instruction?
@@ -973,7 +1092,7 @@ static size_t getRegVal(unsigned regNum, PCONTEXT regs)
}
/****************************************************************************/
-static SLOT getTargetOfCall(SLOT instrPtr, PCONTEXT regs, SLOT*nextInstr) {
+static PBYTE getTargetOfCall(PBYTE instrPtr, PCONTEXT regs, PBYTE* nextInstr) {
BYTE sibindexadj = 0;
BYTE baseadj = 0;
@@ -984,12 +1103,12 @@ static SLOT getTargetOfCall(SLOT instrPtr, PCONTEXT regs, SLOT*nextInstr) {
// When decoding the instructions of a method which is sprinkled with
// TRAP instructions for GCStress, we decode the bytes from a copy
// of the instructions stored before the traps-for-gc were inserted.
- // Hoiwever, the PC-relative addressing/displacement of the CALL-target
+ // However, the PC-relative addressing/displacement of the CALL-target
// will still be with respect to the currently executing PC.
// So, if a register context is available, we pick the PC from it
// (for address calculation purposes only).
- SLOT PC = (regs) ? (SLOT)GetIP(regs) : instrPtr;
+ PBYTE PC = (regs) ? (PBYTE)GetIP(regs) : instrPtr;
#ifdef _TARGET_ARM_
if((instrPtr[1] & 0xf0) == 0xf0) // direct call
@@ -1061,7 +1180,7 @@ static SLOT getTargetOfCall(SLOT instrPtr, PCONTEXT regs, SLOT*nextInstr) {
// Note that the signed displacement is sign-extended
// to 64-bit on AMD64
- return((SLOT)(base + (SSIZE_T)displacement));
+ return((PBYTE)(base + (SSIZE_T)displacement));
}
if (instrPtr[0] == 0xFF) { // Indirect Absolute Near
@@ -1070,7 +1189,7 @@ static SLOT getTargetOfCall(SLOT instrPtr, PCONTEXT regs, SLOT*nextInstr) {
BYTE mod = (instrPtr[1] & 0xC0) >> 6;
BYTE rm = (instrPtr[1] & 0x7);
- SLOT result;
+ PBYTE result;
switch (mod) {
case 0:
@@ -1145,7 +1264,7 @@ static SLOT getTargetOfCall(SLOT instrPtr, PCONTEXT regs, SLOT*nextInstr) {
result = 0;
#endif // _TARGET_AMD64_
} else {
- result = (SLOT)getRegVal(baseadj + rm, regs);
+ result = (PBYTE)getRegVal(baseadj + rm, regs);
}
if (mod == 0) {
@@ -1174,14 +1293,14 @@ static SLOT getTargetOfCall(SLOT instrPtr, PCONTEXT regs, SLOT*nextInstr) {
//
// Now dereference thru the result to get the resulting IP.
//
- result = (SLOT)(*((SLOT *)result));
+ result = (PBYTE)(*((PBYTE *)result));
break;
case 3:
default:
- result = (SLOT)getRegVal(baseadj + rm, regs);
+ result = (PBYTE)getRegVal(baseadj + rm, regs);
displace += 2;
break;
@@ -1237,25 +1356,19 @@ bool IsGcCoverageInterrupt(LPVOID ip)
return false;
}
- // Now it's safe to dereference the IP to check the instruction
-#if defined(_TARGET_ARM64_)
- UINT32 instructionCode = *reinterpret_cast<UINT32 *>(ip);
-#elif defined(_TARGET_ARM_)
- UINT16 instructionCode = *reinterpret_cast<UINT16 *>(ip);
-#else
- UINT8 instructionCode = *reinterpret_cast<UINT8 *>(ip);
-#endif
- switch (instructionCode)
+ PBYTE instrPtr = reinterpret_cast<PBYTE>(ip);
+
+ if (IsGcCoverageInterruptInstruction(instrPtr))
{
- case INTERRUPT_INSTR:
- case INTERRUPT_INSTR_CALL:
- case INTERRUPT_INSTR_PROTECT_RET:
- return true;
+ return true;
+ }
- default:
- // Another thread may have already changed the code back to the original
- return instructionCode == gcCover->savedCode[codeInfo.GetRelOffset()];
+ if (IsOriginalInstruction(instrPtr, gcCover, codeInfo.GetRelOffset()))
+ {
+ // Another thread may have already changed the code back to the original.
+ return true;
}
+ return false;
}
// Remove the GcCoverage interrupt instruction, and restore the
@@ -1309,10 +1422,10 @@ BOOL OnGcCoverageInterrupt(PCONTEXT regs)
// If this trap instruction is taken in place of CORINFO_HELP_STOP_FOR_GC()
// Do not start a GC, but continue with the original instruction.
// See the comments above SprinkleBreakpoints() function.
- SLOT nextInstr;
- SLOT target = getTargetOfCall(savedInstrPtr, regs, &nextInstr);
+ PBYTE nextInstr;
+ PBYTE target = getTargetOfCall(savedInstrPtr, regs, &nextInstr);
- if (target == (SLOT)JIT_RareDisableHelper) {
+ if (target == (PBYTE)JIT_RareDisableHelper) {
RemoveGcCoverageInterrupt(instrPtr, savedInstrPtr);
return TRUE;
}
@@ -1354,7 +1467,7 @@ FORCEINLINE void UpdateGCStressInstructionWithoutGC ()
void DoGcStress (PCONTEXT regs, MethodDesc *pMD)
{
PCODE controlPc = GetIP(regs);
- TADDR instrPtr = PCODEToPINSTR(controlPc);
+ PBYTE instrPtr = reinterpret_cast<PBYTE>(PCODEToPINSTR(controlPc));
if (!pMD)
{
@@ -1371,42 +1484,51 @@ void DoGcStress (PCONTEXT regs, MethodDesc *pMD)
Thread *pThread = GetThread();
-#if defined(_TARGET_X86_) || defined(_TARGET_AMD64_)
- BYTE instrVal = *(BYTE *)instrPtr;
- forceStack[6] = &instrVal; // This is so I can see it fastchecked
-
- if (instrVal != INTERRUPT_INSTR &&
- instrVal != INTERRUPT_INSTR_CALL &&
- instrVal != INTERRUPT_INSTR_PROTECT_RET) {
- _ASSERTE(instrVal == gcCover->savedCode[offset]); // someone beat us to it.
- return; // Someone beat us to it, just go on running
+ if (!IsGcCoverageInterruptInstruction(instrPtr))
+ {
+ // This assert can fail if another thread changed original instruction to
+ // GCCoverage Interrupt instruction between these two commands. Uncomment it
+ // when threading issue gets resolved.
+ // _ASSERTE(IsOriginalInstruction(instrPtr, gcCover, offset));
+
+ // Someone beat us to it, just go on running.
+ return;
}
- bool atCall = (instrVal == INTERRUPT_INSTR_CALL);
- bool afterCallProtect = (instrVal == INTERRUPT_INSTR_PROTECT_RET);
+ bool atCall;
+ bool afterCallProtect[2] = { false, false };
-#elif defined(_TARGET_ARM_)
+#if defined(_TARGET_X86_) || defined(_TARGET_AMD64_)
- WORD instrVal = *(WORD*)instrPtr;
+ BYTE instrVal = *instrPtr;
forceStack[6] = &instrVal; // This is so I can see it fastchecked
- size_t instrLen = GetARMInstructionLength(instrVal);
+ atCall = (instrVal == INTERRUPT_INSTR_CALL);
- bool atCall;
- bool afterCallProtect;
+ if (instrVal == INTERRUPT_INSTR_PROTECT_BOTH_RET)
+ {
+ afterCallProtect[0] = afterCallProtect[1] = true;
+ }
+ else if (instrVal == INTERRUPT_INSTR_PROTECT_FIRST_RET)
+ {
+ afterCallProtect[0] = true;
+ }
+ else if (instrVal == INTERRUPT_INSTR_PROTECT_SECOND_RET)
+ {
+ afterCallProtect[1] = true;
+ }
+
+#elif defined(_TARGET_ARM_)
+ forceStack[6] = (WORD*)instrPtr; // This is so I can see it fastchecked
+
+ size_t instrLen = GetARMInstructionLength(instrPtr);
if (instrLen == 2)
{
- if (instrVal != INTERRUPT_INSTR &&
- instrVal != INTERRUPT_INSTR_CALL &&
- instrVal != INTERRUPT_INSTR_PROTECT_RET) {
- _ASSERTE(instrVal == *(WORD*)(gcCover->savedCode + offset)); // someone beat us to it.
- return; // Someone beat us to it, just go on running
- }
-
+ WORD instrVal = *(WORD*)instrPtr;
atCall = (instrVal == INTERRUPT_INSTR_CALL);
- afterCallProtect = (instrVal == INTERRUPT_INSTR_PROTECT_RET);
+ afterCallProtect[0] = (instrVal == INTERRUPT_INSTR_PROTECT_RET);
}
else
{
@@ -1414,29 +1536,15 @@ void DoGcStress (PCONTEXT regs, MethodDesc *pMD)
DWORD instrVal32 = *(DWORD*)instrPtr;
- if (instrVal32 != INTERRUPT_INSTR_32 &&
- instrVal32 != INTERRUPT_INSTR_CALL_32 &&
- instrVal32 != INTERRUPT_INSTR_PROTECT_RET_32) {
- _ASSERTE(instrVal32 == *(DWORD*)(gcCover->savedCode + offset)); // someone beat us to it.
- return; // Someone beat us to it, just go on running
- }
-
atCall = (instrVal32 == INTERRUPT_INSTR_CALL_32);
- afterCallProtect = (instrVal32 == INTERRUPT_INSTR_PROTECT_RET_32);
+ afterCallProtect[0] = (instrVal32 == INTERRUPT_INSTR_PROTECT_RET_32);
}
#elif defined(_TARGET_ARM64_)
DWORD instrVal = *(DWORD *)instrPtr;
forceStack[6] = &instrVal; // This is so I can see it fastchecked
- if (instrVal != INTERRUPT_INSTR &&
- instrVal != INTERRUPT_INSTR_CALL &&
- instrVal != INTERRUPT_INSTR_PROTECT_RET) {
- _ASSERTE(instrVal == *(DWORD *)(gcCover->savedCode + offset)); // someone beat us to it.
- return; // Someone beat us to it, just go on running
- }
-
- bool atCall = (instrVal == INTERRUPT_INSTR_CALL);
- bool afterCallProtect = (instrVal == INTERRUPT_INSTR_PROTECT_RET);
+ atCall = (instrVal == INTERRUPT_INSTR_CALL);
+ afterCallProtect[0] = (instrVal == INTERRUPT_INSTR_PROTECT_RET);
#endif // _TARGET_*
@@ -1462,7 +1570,7 @@ void DoGcStress (PCONTEXT regs, MethodDesc *pMD)
}
// If some other thread removes interrupt points, we abandon epilog testing
- // for this routine since the barrier at the begining of the routine may not
+ // for this routine since the barrier at the beginning of the routine may not
// be up anymore, and thus the caller context is now not guaranteed to be correct.
// This should happen only very rarely so is not a big deal.
if (gcCover->callerThread != pThread)
@@ -1550,7 +1658,7 @@ void DoGcStress (PCONTEXT regs, MethodDesc *pMD)
#if defined(_TARGET_X86_) || defined(_TARGET_AMD64_) || defined(_TARGET_ARM_) || defined(_TARGET_ARM64_)
/* In non-fully interrruptable code, if the EIP is just after a call instr
- means something different because it expects that that we are IN the
+ means something different because it expects that we are IN the
called method, not actually at the instruction just after the call. This
is important, because until the called method returns, IT is responsible
for protecting the return value. Thus just after a call instruction
@@ -1584,13 +1692,13 @@ void DoGcStress (PCONTEXT regs, MethodDesc *pMD)
// but it's not been a problem so far.
// see details about <GCStress instruction update race> in comments above
pThread->CommitGCStressInstructionUpdate ();
- BYTE* nextInstr;
- SLOT target = getTargetOfCall((BYTE*) instrPtr, regs, (BYTE**)&nextInstr);
+ PBYTE nextInstr;
+ PBYTE target = getTargetOfCall((BYTE*) instrPtr, regs, (BYTE**)&nextInstr);
if (target != 0)
{
if (!pThread->PreemptiveGCDisabled())
{
- // We are in preemtive mode in JITTed code. This implies that we are into IL stub
+ // We are in preemptive mode in JITTed code. This implies that we are into IL stub
// close to PINVOKE method. This call will never return objectrefs.
#ifdef _TARGET_ARM_
size_t instrLen = GetARMInstructionLength(nextInstr);
@@ -1614,32 +1722,7 @@ void DoGcStress (PCONTEXT regs, MethodDesc *pMD)
// It could become a problem if 64bit does partially interrupt work.
// OK, we have the MD, mark the instruction after the CALL
// appropriately
-
- ReturnKind returnKind = targetMD->GetReturnKind(true);
- bool protectReturn = IsPointerReturnKind(returnKind);
-#ifdef _TARGET_ARM_
- size_t instrLen = GetARMInstructionLength(nextInstr);
- if (protectReturn)
- if (instrLen == 2)
- *(WORD*)nextInstr = INTERRUPT_INSTR_PROTECT_RET;
- else
- *(DWORD*)nextInstr = INTERRUPT_INSTR_PROTECT_RET_32;
- else
- if (instrLen == 2)
- *(WORD*)nextInstr = INTERRUPT_INSTR;
- else
- *(DWORD*)nextInstr = INTERRUPT_INSTR_32;
-#elif defined(_TARGET_ARM64_)
- if (protectReturn)
- *(DWORD *)nextInstr = INTERRUPT_INSTR_PROTECT_RET;
- else
- *(DWORD *)nextInstr = INTERRUPT_INSTR;
-#else
- if (protectReturn)
- *nextInstr = INTERRUPT_INSTR_PROTECT_RET;
- else
- *nextInstr = INTERRUPT_INSTR;
-#endif
+ ReplaceInstrAfterCall(nextInstr, targetMD);
}
}
}
@@ -1649,7 +1732,7 @@ void DoGcStress (PCONTEXT regs, MethodDesc *pMD)
FlushInstructionCache(GetCurrentProcess(), (LPCVOID)instrPtr, 10);
// It's not GC safe point, the GC Stress instruction is
- // already commited and interrupt is already put at next instruction so we just return.
+ // already committed and interrupt is already put at next instruction so we just return.
return;
}
#else
@@ -1692,26 +1775,38 @@ void DoGcStress (PCONTEXT regs, MethodDesc *pMD)
frame.Push(pThread);
#endif // USE_REDIRECT_FOR_GCSTRESS
-#if defined(_TARGET_X86_) || defined(_TARGET_AMD64_) || defined(_TARGET_ARM_) || defined(_TARGET_ARM64_)
- FrameWithCookie<GCFrame> gcFrame;
- DWORD_PTR retVal = 0;
- if (afterCallProtect) // Do I need to protect return value?
- {
-#ifdef _TARGET_AMD64_
- retVal = regs->Rax;
+ DWORD_PTR retValRegs[2] = { 0 };
+ UINT numberOfRegs = 0;
+
+ if (afterCallProtect[0])
+ {
+#if defined(_TARGET_AMD64_)
+ retValRegs[numberOfRegs++] = regs->Rax;
#elif defined(_TARGET_X86_)
- retVal = regs->Eax;
-#elif defined(_TARGET_ARM_)
- retVal = regs->R0;
+ retValRegs[numberOfRegs++] = regs->Eax;
+#elif defined(_TARGET_ARM_)
+ retValRegs[numberOfRegs++] = regs->R0;
#elif defined(_TARGET_ARM64_)
- retVal = regs->X0;
-#else
- PORTABILITY_ASSERT("DoGCStress - return register");
-#endif
- gcFrame.Init(pThread, (OBJECTREF*) &retVal, 1, TRUE);
+ retValRegs[numberOfRegs++] = regs->X0;
+#endif // _TARGET_ARM64_
+ }
+
+ if (afterCallProtect[1])
+ {
+#if defined(_TARGET_AMD64_) && defined(PLATFORM_UNIX)
+ retValRegs[numberOfRegs++] = regs->Rdx;
+#else // !_TARGET_AMD64_ || !PLATFORM_UNIX
+ _ASSERTE(!"Not expected multi reg return with pointers.");
+#endif // !_TARGET_AMD64_ || !PLATFORM_UNIX
+ }
+
+ FrameWithCookie<GCFrame> gcFrame;
+ if (numberOfRegs != 0)
+ {
+ _ASSERTE(sizeof(OBJECTREF) == sizeof(DWORD_PTR));
+ gcFrame.Init(pThread, (OBJECTREF*)retValRegs, numberOfRegs, TRUE);
}
-#endif // _TARGET_*
if (gcCover->lastMD != pMD)
{
@@ -1745,23 +1840,34 @@ void DoGcStress (PCONTEXT regs, MethodDesc *pMD)
CONSISTENCY_CHECK(!pThread->HasPendingGCStressInstructionUpdate());
-#if defined(_TARGET_X86_) || defined(_TARGET_AMD64_) || defined(_TARGET_ARM_) || defined(_TARGET_ARM64_)
- if (afterCallProtect)
+ if (numberOfRegs != 0)
{
-#ifdef _TARGET_AMD64_
- regs->Rax = retVal;
+ if (afterCallProtect[0])
+ {
+#if defined(_TARGET_AMD64_)
+ regs->Rax = retValRegs[0];
#elif defined(_TARGET_X86_)
- regs->Eax = retVal;
+ regs->Eax = retValRegs[0];
#elif defined(_TARGET_ARM_)
- regs->R0 = retVal;
+ regs->R0 = retValRegs[0];
#elif defined(_TARGET_ARM64_)
- regs->X[0] = retVal;
+ regs->X[0] = retValRegs[0];
#else
- PORTABILITY_ASSERT("DoGCStress - return register");
-#endif
+ PORTABILITY_ASSERT("DoGCStress - return register");
+#endif
+ }
+
+ if (afterCallProtect[1])
+ {
+#if defined(_TARGET_AMD64_) && defined(PLATFORM_UNIX)
+ regs->Rdx = retValRegs[numberOfRegs - 1];
+#else // !_TARGET_AMD64_ || !PLATFORM_UNIX
+ _ASSERTE(!"Not expected multi reg return with pointers.");
+#endif // !_TARGET_AMD64_ || !PLATFORM_UNIX
+ }
+
gcFrame.Pop();
}
-#endif // _TARGET_*
#if !defined(USE_REDIRECT_FOR_GCSTRESS)
frame.Pop(pThread);
diff --git a/src/vm/gccover.h b/src/vm/gccover.h
index 7835e8c990..6588975cd0 100644
--- a/src/vm/gccover.h
+++ b/src/vm/gccover.h
@@ -68,9 +68,11 @@ public:
#if defined(_TARGET_X86_) || defined(_TARGET_AMD64_)
-#define INTERRUPT_INSTR 0xF4 // X86 HLT instruction (any 1 byte illegal instruction will do)
-#define INTERRUPT_INSTR_CALL 0xFA // X86 CLI instruction
-#define INTERRUPT_INSTR_PROTECT_RET 0xFB // X86 STI instruction
+#define INTERRUPT_INSTR 0xF4 // X86 HLT instruction (any 1 byte illegal instruction will do)
+#define INTERRUPT_INSTR_CALL 0xFA // X86 CLI instruction
+#define INTERRUPT_INSTR_PROTECT_FIRST_RET 0xFB // X86 STI instruction, protect the first return register
+#define INTERRUPT_INSTR_PROTECT_SECOND_RET 0xEC // X86 IN instruction, protect the second return register
+#define INTERRUPT_INSTR_PROTECT_BOTH_RET 0xED // X86 IN instruction, protect both return registers
#elif defined(_TARGET_ARM_)
diff --git a/src/vm/method.cpp b/src/vm/method.cpp
index c17901582f..d0f5bfa2b4 100644
--- a/src/vm/method.cpp
+++ b/src/vm/method.cpp
@@ -1284,8 +1284,17 @@ ReturnKind MethodDesc::ParseReturnKindFromSig(INDEBUG(bool supportStringConstruc
if (pReturnTypeMT->ContainsPointers())
{
- _ASSERTE(pReturnTypeMT->GetNumInstanceFieldBytes() == sizeof(void*));
- return RT_Object;
+ if (pReturnTypeMT->GetNumInstanceFields() == 1)
+ {
+ _ASSERTE(pReturnTypeMT->GetNumInstanceFieldBytes() == sizeof(void*));
+ // Note: we can't distinguish RT_Object from RT_ByRef, the caller has to tolerate that.
+ return RT_Object;
+ }
+ else
+ {
+ // Multi reg return case with pointers, can't restore the actual kind.
+ return RT_Illegal;
+ }
}
}
}
diff --git a/src/vm/method.hpp b/src/vm/method.hpp
index ca2e88fdcc..3122e4e91a 100644
--- a/src/vm/method.hpp
+++ b/src/vm/method.hpp
@@ -1666,6 +1666,12 @@ private:
ReturnKind ParseReturnKindFromSig(INDEBUG(bool supportStringConstructors = false));
public:
+ // This method is used to restore ReturnKind using the class handle, it is fully supported only on x64 Ubuntu,
+ // other platforms do not support multi-reg return case with pointers.
+ // Use this method only when you can't hit this case
+ // (like ComPlusMethodFrame::GcScanRoots) or when you can tolerate RT_Illegal return.
+ // Also, on the other platforms for a single field struct return case
+ // the function can't distinguish RT_Object and RT_ByRef.
ReturnKind GetReturnKind(INDEBUG(bool supportStringConstructors = false));
public:
diff --git a/tests/src/JIT/Regression/JitBlue/GitHub_23199/GitHub_23199.cs b/tests/src/JIT/Regression/JitBlue/GitHub_23199/GitHub_23199.cs
new file mode 100644
index 0000000000..5d4983e320
--- /dev/null
+++ b/tests/src/JIT/Regression/JitBlue/GitHub_23199/GitHub_23199.cs
@@ -0,0 +1,201 @@
+using System;
+using System.Diagnostics;
+using System.Runtime.CompilerServices;
+
+// The test revealed some problems of GCStress infrastructure on platforms with multi reg returns (arm64, amd64 Unix).
+// It required GCStress=0xc and GcStressOnDirectCalls=1 to hit issues. The issues were with saving GC pointers in the return registers.
+// The GC infra has to correctly mark registers with pointers as alive and must not report registers without pointers.
+
+#if BIT32
+using nint = System.Int32;
+#else
+using nint = System.Int64;
+#endif
+
+namespace GitHub_23199
+{
+ public class Program
+ {
+ [MethodImpl(MethodImplOptions.NoInlining)]
+ static Object TestCrossgenedReturnWith2PointersStruct()
+ {
+ ProcessStartInfo pi = new ProcessStartInfo();
+ // pi.Environment calls crossgened HashtableEnumerator::get_Entry returning struct that we need.
+ Console.WriteLine(pi.Environment.Count);
+ return pi;
+ }
+
+ struct TwoPointers
+ {
+ public Object a;
+ public Object b;
+ }
+
+ [MethodImpl(MethodImplOptions.NoInlining)]
+ static TwoPointers GetTwoPointersStruct()
+ {
+ var a = new TwoPointers();
+ a.a = new String("TestTwoPointers");
+ a.b = new string("Passed");
+ return a;
+ }
+
+ [MethodImpl(MethodImplOptions.NoInlining)]
+ static Object TestTwoPointers()
+ {
+ var a = GetTwoPointersStruct(); // Report both.
+ Console.WriteLine(a.a + " " + a.b);
+ return a;
+ }
+
+ struct OnePointer
+ {
+ public Object a;
+ }
+
+ [MethodImpl(MethodImplOptions.NoInlining)]
+ static TwoPointers GetOnePointer()
+ {
+ var a = new TwoPointers();
+ a.a = new String("TestOnePointer Passed");
+ return a;
+ }
+
+ [MethodImpl(MethodImplOptions.NoInlining)]
+ static Object TestOnePointer()
+ {
+ var a = GetOnePointer(); // Report one.
+ Console.WriteLine(a.a);
+ return a;
+ }
+
+ struct FirstPointer
+ {
+ public Object a;
+ public nint b;
+ }
+
+ [MethodImpl(MethodImplOptions.NoInlining)]
+ static FirstPointer GetFirstPointer()
+ {
+ var a = new FirstPointer();
+ a.a = new String("TestFirstPointer Passed");
+ a.b = 100;
+ return a;
+ }
+
+ [MethodImpl(MethodImplOptions.NoInlining)]
+ static Object TestFirstPointer()
+ {
+ var a = GetFirstPointer(); // Report the first field, do not report the second.
+ Console.WriteLine(a.a);
+ return a;
+ }
+
+ struct SecondPointer
+ {
+ public nint a;
+ public Object b;
+ }
+
+ [MethodImpl(MethodImplOptions.NoInlining)]
+ static SecondPointer GetSecondPointer()
+ {
+ var a = new SecondPointer();
+ a.a = 100;
+ a.b = new String("TestSecondPointer Passed");
+ return a;
+ }
+
+ [MethodImpl(MethodImplOptions.NoInlining)]
+ static Object TestSecondPointer()
+ {
+ var a = GetSecondPointer(); // Report the second field, do not report the first.
+ Console.WriteLine(a.b);
+ return a;
+ }
+
+ struct NoPointer1
+ {
+ public nint a;
+ }
+
+ [MethodImpl(MethodImplOptions.NoInlining)]
+ static NoPointer1 GetNoPointer1()
+ {
+ var a = new NoPointer1();
+ a.a = 100;
+ return a;
+ }
+
+ [MethodImpl(MethodImplOptions.NoInlining)]
+ static Object TestNoPointer1()
+ {
+ var a = GetNoPointer1(); // Do not report anything.
+ Console.WriteLine("TestNoPointer1 Passed");
+ return a;
+ }
+
+ struct NoPointer2
+ {
+ public nint a;
+ public nint b;
+ }
+
+ [MethodImpl(MethodImplOptions.NoInlining)]
+ static NoPointer2 GetNoPointer2()
+ {
+ var a = new NoPointer2();
+ a.a = 100;
+ a.b = 100;
+ return a;
+ }
+
+ [MethodImpl(MethodImplOptions.NoInlining)]
+ static Object TestNoPointer2()
+ {
+ NoPointer2 a = GetNoPointer2(); // Do not report anything.
+ Console.WriteLine("TestNoPointer2 Passed");
+ return a;
+ }
+
+ struct ThirdPointer
+ {
+ public nint a;
+ public nint b;
+ public Object c;
+ }
+
+ [MethodImpl(MethodImplOptions.NoInlining)]
+ static ThirdPointer GetThirdPointer()
+ {
+ var a = new ThirdPointer();
+ a.a = 100;
+ a.b = 100;
+ a.c = new String("TestThirdPointer Passed");
+ return a;
+ }
+
+ [MethodImpl(MethodImplOptions.NoInlining)]
+ static Object TestThirdPointer()
+ {
+ ThirdPointer a = GetThirdPointer(); // Do not return in registers.
+ Console.WriteLine(a.c);
+ return a;
+ }
+
+
+ static int Main(string[] args)
+ {
+ TestCrossgenedReturnWith2PointersStruct();
+ TestTwoPointers();
+ TestOnePointer();
+ TestFirstPointer();
+ TestSecondPointer();
+ TestNoPointer1();
+ TestNoPointer2();
+ TestThirdPointer();
+ return 100;
+ }
+ }
+}
diff --git a/tests/src/JIT/Regression/JitBlue/GitHub_23199/GitHub_23199.csproj b/tests/src/JIT/Regression/JitBlue/GitHub_23199/GitHub_23199.csproj
new file mode 100644
index 0000000000..273893797e
--- /dev/null
+++ b/tests/src/JIT/Regression/JitBlue/GitHub_23199/GitHub_23199.csproj
@@ -0,0 +1,49 @@
+<?xml version="1.0" encoding="utf-8"?>
+<Project ToolsVersion="12.0" DefaultTargets="Build" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
+ <Import Project="$([MSBuild]::GetDirectoryNameOfFileAbove($(MSBuildThisFileDirectory), dir.props))\dir.props" />
+ <PropertyGroup>
+ <Configuration Condition=" '$(Configuration)' == '' ">Debug</Configuration>
+ <Platform Condition=" '$(Platform)' == '' ">AnyCPU</Platform>
+ <SchemaVersion>2.0</SchemaVersion>
+ <ProjectGuid>{95DFC527-4DC1-495E-97D7-E94EE1F7140D}</ProjectGuid>
+ <OutputType>Exe</OutputType>
+ <ProjectTypeGuids>{786C830F-07A1-408B-BD7F-6EE04809D6DB};{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}</ProjectTypeGuids>
+ <SolutionDir Condition="$(SolutionDir) == '' Or $(SolutionDir) == '*Undefined*'">..\..\</SolutionDir>
+ <CLRTestPriority>1</CLRTestPriority>
+ </PropertyGroup>
+ <PropertyGroup Condition=" '$(PointerSize)' == '32' ">
+ <DefineConstants>BIT32;$(DefineConstants)</DefineConstants>
+ </PropertyGroup>
+ <!-- Default configurations to help VS understand the configurations -->
+ <PropertyGroup Condition=" '$(Configuration)|$(Platform)' == 'Debug|AnyCPU' "></PropertyGroup>
+ <PropertyGroup Condition=" '$(Configuration)|$(Platform)' == 'Release|AnyCPU' " />
+ <ItemGroup>
+ <CodeAnalysisDependentAssemblyPaths Condition=" '$(VS100COMNTOOLS)' != '' " Include="$(VS100COMNTOOLS)..\IDE\PrivateAssemblies">
+ <Visible>False</Visible>
+ </CodeAnalysisDependentAssemblyPaths>
+ </ItemGroup>
+ <PropertyGroup>
+ <DebugType>None</DebugType>
+ <Optimize>True</Optimize>
+ </PropertyGroup>
+ <ItemGroup>
+ <Service Include="{82A7F48D-3B50-4B1E-B82E-3ADA8210C358}" />
+ </ItemGroup>
+ <PropertyGroup>
+ <CLRTestBatchPreCommands><![CDATA[
+$(CLRTestBatchPreCommands)
+set COMPlus_GcStressOnDirectCalls=1
+set COMPlus_GcStress=0xc
+]]></CLRTestBatchPreCommands>
+ <BashCLRTestPreCommands><![CDATA[
+$(BashCLRTestPreCommands)
+export COMPlus_GcStressOnDirectCalls=1
+export COMPlus_GcStress=0xc
+]]></BashCLRTestPreCommands>
+ </PropertyGroup>
+ <ItemGroup>
+ <Compile Include="$(MSBuildProjectName).cs" />
+ </ItemGroup>
+ <Import Project="$([MSBuild]::GetDirectoryNameOfFileAbove($(MSBuildThisFileDirectory), dir.targets))\dir.targets" />
+ <PropertyGroup Condition=" '$(MsBuildProjectDirOverride)' != '' "></PropertyGroup>
+</Project>
diff --git a/tests/src/ilasm/System/Runtime/CompilerServices/MethodImplOptionsTests.csproj b/tests/src/ilasm/System/Runtime/CompilerServices/MethodImplOptionsTests.csproj
index ec2f298707..1d23bc3f4e 100644
--- a/tests/src/ilasm/System/Runtime/CompilerServices/MethodImplOptionsTests.csproj
+++ b/tests/src/ilasm/System/Runtime/CompilerServices/MethodImplOptionsTests.csproj
@@ -6,8 +6,6 @@
<Platform Condition=" '$(Platform)' == '' ">AnyCPU</Platform>
<ProjectGuid>{1AFDD005-7AB9-48E0-B6D4-4DE0560C936D}</ProjectGuid>
<OutputType>Exe</OutputType>
- <!-- https://github.com/dotnet/coreclr/issues/23199 -->
- <GCStressIncompatible>true</GCStressIncompatible>
</PropertyGroup>
<!-- Default configurations to help VS understand the configurations -->
<PropertyGroup Condition="'$(Configuration)|$(Platform)' == 'Debug|x64'">