summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--src/jit/block.h2
-rw-r--r--src/jit/codegenarm64.cpp88
-rw-r--r--src/jit/codegencommon.cpp39
-rw-r--r--src/jit/compiler.hpp4
-rw-r--r--src/jit/emit.cpp1
-rw-r--r--src/jit/emitarm64.cpp18
-rw-r--r--src/jit/importer.cpp8
-rw-r--r--src/jit/instrsarm64.h8
-rw-r--r--src/jit/lower.cpp2
-rw-r--r--src/jit/lowerarm64.cpp7
-rw-r--r--tests/arm64/Tests.lst14
11 files changed, 117 insertions, 74 deletions
diff --git a/src/jit/block.h b/src/jit/block.h
index 116b58554b..ed6177bf34 100644
--- a/src/jit/block.h
+++ b/src/jit/block.h
@@ -925,14 +925,12 @@ typedef unsigned weight_t; // Type used to hold block and edge weigh
bool endsWithJmpMethod(Compiler *comp);
-#if FEATURE_FASTTAILCALL
bool endsWithTailCall(Compiler* comp, bool fastTailCallsOnly, bool tailCallsConvertibleToLoopOnly, GenTree** tailCall);
bool endsWithTailCallOrJmp(Compiler *comp,
bool fastTailCallsOnly = false);
bool endsWithTailCallConvertibleToLoop(Compiler *comp, GenTree** tailCall);
-#endif // FEATURE_FASTTAILCALL
#if JIT_FEATURE_SSA_SKIP_DEFS
// Returns the first statement in the statement list of "this" that is
diff --git a/src/jit/codegenarm64.cpp b/src/jit/codegenarm64.cpp
index 031131b56b..06d47ab099 100644
--- a/src/jit/codegenarm64.cpp
+++ b/src/jit/codegenarm64.cpp
@@ -5361,7 +5361,6 @@ void CodeGen::genJmpMethod(GenTreePtr jmp)
return;
}
-#if 0
// Make sure register arguments are in their initial registers
// and stack arguments are put back as well.
unsigned varNum;
@@ -5399,7 +5398,7 @@ void CodeGen::genJmpMethod(GenTreePtr jmp)
}
else if (varDsc->lvRegNum == REG_STK)
{
- // Skip args which are currently living in stack.
+ // Skip args which are currently living in stack.
continue;
}
@@ -5407,9 +5406,11 @@ void CodeGen::genJmpMethod(GenTreePtr jmp)
// a stack argument currently living in a register. In either case the following
// assert should hold.
assert(varDsc->lvRegNum != REG_STK);
+ assert(varDsc->TypeGet() != TYP_STRUCT);
+ var_types storeType = genActualType(varDsc->TypeGet());
+ emitAttr storeSize = emitActualTypeSize(storeType);
- var_types loadType = varDsc->lvaArgType();
- getEmitter()->emitIns_S_R(ins_Store(loadType), emitTypeSize(loadType), varDsc->lvRegNum, varNum, 0);
+ getEmitter()->emitIns_S_R(ins_Store(storeType), storeSize, varDsc->lvRegNum, varNum, 0);
// Update lvRegNum life and GC info to indicate lvRegNum is dead and varDsc stack slot is going live.
// Note that we cannot modify varDsc->lvRegNum here because another basic block may not be expecting it.
@@ -5417,7 +5418,7 @@ void CodeGen::genJmpMethod(GenTreePtr jmp)
regMaskTP tempMask = genRegMask(varDsc->lvRegNum);
regSet.RemoveMaskVars(tempMask);
gcInfo.gcMarkRegSetNpt(tempMask);
- if (varDsc->lvTracked)
+ if (compiler->lvaIsGCTracked(varDsc))
{
VarSetOps::AddElemD(compiler, gcInfo.gcVarPtrSetCur, varNum);
}
@@ -5453,14 +5454,24 @@ void CodeGen::genJmpMethod(GenTreePtr jmp)
// Is register argument already in the right register?
// If not load it from its stack location.
- var_types loadType = varDsc->lvaArgType();
- regNumber argReg = varDsc->lvArgReg; // incoming arg register
+ regNumber argReg = varDsc->lvArgReg; // incoming arg register
+ regNumber argRegNext = REG_NA;
if (varDsc->lvRegNum != argReg)
{
- assert(genIsValidReg(argReg));
-
- getEmitter()->emitIns_R_S(ins_Load(loadType), emitTypeSize(loadType), argReg, varNum, 0);
+ var_types loadType = TYP_UNDEF;
+ if (varTypeIsStruct(varDsc))
+ {
+ // Must be <= 16 bytes or else it wouldn't be passed in registers
+ noway_assert(EA_SIZE_IN_BYTES(varDsc->lvSize()) <= MAX_PASS_MULTIREG_BYTES);
+ loadType = compiler->getJitGCType(varDsc->lvGcLayout[0]);
+ }
+ else
+ {
+ loadType = compiler->mangleVarArgsType(genActualType(varDsc->TypeGet()));
+ }
+ emitAttr loadSize = emitActualTypeSize(loadType);
+ getEmitter()->emitIns_R_S(ins_Load(loadType), loadSize, argReg, varNum, 0);
// Update argReg life and GC Info to indicate varDsc stack slot is dead and argReg is going live.
// Note that we cannot modify varDsc->lvRegNum here because another basic block may not be expecting it.
@@ -5468,29 +5479,39 @@ void CodeGen::genJmpMethod(GenTreePtr jmp)
// and after which reg life and gc info will be recomputed for the new block in genCodeForBBList().
regSet.AddMaskVars(genRegMask(argReg));
gcInfo.gcMarkRegPtrVal(argReg, loadType);
- if (varDsc->lvTracked)
+
+ if (varDsc->lvIsMultiregStruct())
+ {
+ // Restore the next register.
+ argRegNext = genMapRegArgNumToRegNum(genMapRegNumToRegArgNum(argReg, loadType) + 1, loadType);
+ loadType = compiler->getJitGCType(varDsc->lvGcLayout[1]);
+ loadSize = emitActualTypeSize(loadType);
+ getEmitter()->emitIns_R_S(ins_Load(loadType), loadSize, argRegNext, varNum, TARGET_POINTER_SIZE);
+
+ regSet.AddMaskVars(genRegMask(argRegNext));
+ gcInfo.gcMarkRegPtrVal(argRegNext, loadType);
+ }
+
+ if (compiler->lvaIsGCTracked(varDsc))
{
- VarSetOps::RemoveElemD(compiler, gcInfo.gcVarPtrSetCur, varNum);
+ VarSetOps::RemoveElemD(compiler, gcInfo.gcVarPtrSetCur, varNum);
}
}
- // In case of a jmp call to a vararg method also pass the float/double arg in the corresponding int arg register.
+ // In case of a jmp call to a vararg method ensure only integer registers are passed.
if (compiler->info.compIsVarArgs)
{
- regNumber intArgReg;
- if (varTypeIsFloating(loadType))
- {
- intArgReg = compiler->getCallArgIntRegister(argReg);
- inst_RV_RV(INS_mov_xmm2i, argReg, intArgReg, loadType);
- }
- else
+ assert((genRegMask(argReg) & RBM_ARG_REGS) != RBM_NONE);
+
+ fixedIntArgMask |= genRegMask(argReg);
+
+ if (varDsc->lvIsMultiregStruct())
{
- intArgReg = argReg;
+ assert(argRegNext != REG_NA);
+ fixedIntArgMask |= genRegMask(argRegNext);
}
- fixedIntArgMask |= genRegMask(intArgReg);
-
- if (intArgReg == REG_ARG_0)
+ if (argReg == REG_ARG_0)
{
assert(firstArgVarNum == BAD_VAR_NUM);
firstArgVarNum = varNum;
@@ -5498,11 +5519,11 @@ void CodeGen::genJmpMethod(GenTreePtr jmp)
}
}
- // Jmp call to a vararg method - if the method has fewer than 4 fixed arguments,
- // load the remaining arg registers (both int and float) from the corresponding
+ // Jmp call to a vararg method - if the method has fewer than 8 fixed arguments,
+ // load the remaining integer arg registers from the corresponding
// shadow stack slots. This is for the reason that we don't know the number and type
// of non-fixed params passed by the caller, therefore we have to assume the worst case
- // of caller passing float/double args both in int and float arg regs.
+ // of caller passing all 8 integer arg regs.
//
// The caller could have passed gc-ref/byref type var args. Since these are var args
// the callee no way of knowing their gc-ness. Therefore, mark the region that loads
@@ -5512,7 +5533,7 @@ void CodeGen::genJmpMethod(GenTreePtr jmp)
assert(compiler->info.compIsVarArgs);
assert(firstArgVarNum != BAD_VAR_NUM);
- regMaskTP remainingIntArgMask = RBM_ARG_REGS & ~fixedIntArgMask;
+ regMaskTP remainingIntArgMask = RBM_ARG_REGS & ~fixedIntArgMask;
if (remainingIntArgMask != RBM_NONE)
{
getEmitter()->emitDisableGC();
@@ -5524,21 +5545,14 @@ void CodeGen::genJmpMethod(GenTreePtr jmp)
if ((remainingIntArgMask & argRegMask) != 0)
{
remainingIntArgMask &= ~argRegMask;
- getEmitter()->emitIns_R_S(INS_mov, EA_8BYTE, argReg, firstArgVarNum, argOffset);
-
- // also load it in corresponding float arg reg
- regNumber floatReg = compiler->getCallArgFloatRegister(argReg);
- inst_RV_RV(INS_mov_i2xmm, floatReg, argReg);
+ getEmitter()->emitIns_R_S(INS_ldr, EA_8BYTE, argReg, firstArgVarNum, argOffset);
}
argOffset += REGSIZE_BYTES;
- }
+ }
getEmitter()->emitEnableGC();
}
}
-#else // !0
- NYI("genJmpMethod");
-#endif // !0
}
// produce code for a GT_LEA subnode
diff --git a/src/jit/codegencommon.cpp b/src/jit/codegencommon.cpp
index 0a1098ba5c..186ec9b4c5 100644
--- a/src/jit/codegencommon.cpp
+++ b/src/jit/codegencommon.cpp
@@ -3882,6 +3882,7 @@ void CodeGen::genFnPrologCalleeRegArgs(regNumber xtraReg,
if (verbose)
printf("*************** In genFnPrologCalleeRegArgs() for %s regs\n", regState->rsIsFloat ? "float" : "int");
#endif
+
#ifdef _TARGET_ARM64_
if (compiler->info.compIsVarArgs)
{
@@ -9492,7 +9493,43 @@ void CodeGen::genFnEpilog(BasicBlock* block)
{
noway_assert(block->bbJumpKind == BBJ_RETURN);
noway_assert(block->bbTreeList != nullptr);
- NYI("jmp call");
+
+ // figure out what jump we have
+ GenTreePtr jmpStmt = block->lastTopLevelStmt();
+ noway_assert(jmpStmt && (jmpStmt->gtOper == GT_STMT));
+
+ noway_assert(jmpStmt->gtNext == nullptr);
+ GenTreePtr jmpNode = jmpStmt->gtStmt.gtStmtExpr;
+ noway_assert(jmpNode->gtOper == GT_JMP);
+
+ {
+ // Simply emit a jump to the methodHnd. This is similar to a call so we can use
+ // the same descriptor with some minor adjustments.
+ CORINFO_METHOD_HANDLE methHnd = (CORINFO_METHOD_HANDLE)jmpNode->gtVal.gtVal1;
+
+ CORINFO_CONST_LOOKUP addrInfo;
+ compiler->info.compCompHnd->getFunctionEntryPoint(methHnd, &addrInfo);
+ if (addrInfo.accessType != IAT_VALUE)
+ {
+ NYI_ARM64("Unsupported JMP indirection");
+ }
+
+ emitter::EmitCallType callType = emitter::EC_FUNC_TOKEN;
+
+ // Simply emit a jump to the methodHnd. This is similar to a call so we can use
+ // the same descriptor with some minor adjustments.
+ getEmitter()->emitIns_Call(callType,
+ methHnd,
+ INDEBUG_LDISASM_COMMA(nullptr)
+ addrInfo.addr,
+ 0, // argSize
+ EA_UNKNOWN, // retSize
+ gcInfo.gcVarPtrSetCur,
+ gcInfo.gcRegGCrefSetCur,
+ gcInfo.gcRegByrefSetCur,
+ BAD_IL_OFFSET, REG_NA, REG_NA, 0, 0, /* iloffset, ireg, xreg, xmul, disp */
+ true); /* isJump */
+ }
}
else
{
diff --git a/src/jit/compiler.hpp b/src/jit/compiler.hpp
index ec9cba33cb..cdfeebeb5f 100644
--- a/src/jit/compiler.hpp
+++ b/src/jit/compiler.hpp
@@ -4675,8 +4675,6 @@ inline bool BasicBlock::endsWithJmpMethod(Compiler *comp)
return false;
}
-#if FEATURE_FASTTAILCALL
-
// Returns true if the basic block ends with either
// i) GT_JMP or
// ii) tail call (implicit or explicit)
@@ -4784,8 +4782,6 @@ inline bool BasicBlock::endsWithTailCallConvertibleToLoop(Compiler* comp, GenTre
return endsWithTailCall(comp, fastTailCallsOnly, tailCallsConvertibleToLoopOnly, tailCall);
}
-#endif // FEATURE_FASTTAILCALL
-
// Returns the last top level stmt of a given basic block.
// Returns nullptr if the block is empty.
inline GenTreePtr Compiler::fgGetLastTopLevelStmt(BasicBlock *block)
diff --git a/src/jit/emit.cpp b/src/jit/emit.cpp
index a4a20693a4..8c2c32146f 100644
--- a/src/jit/emit.cpp
+++ b/src/jit/emit.cpp
@@ -1166,7 +1166,6 @@ void emitter::dispIns(instrDesc* id)
#if EMIT_TRACK_STACK_DEPTH
assert((int)emitCurStackLvl >= 0);
#endif
-
size_t sz = emitSizeOfInsDsc(id);
assert(id->idDebugOnlyInfo()->idSize == sz);
#endif // DEBUG
diff --git a/src/jit/emitarm64.cpp b/src/jit/emitarm64.cpp
index 2a0bf954b6..33843d37d6 100644
--- a/src/jit/emitarm64.cpp
+++ b/src/jit/emitarm64.cpp
@@ -80,11 +80,11 @@ size_t emitter::emitSizeOfInsDsc(instrDesc *id)
assert((unsigned)id->idInsFmt() < emitFmtCount);
ID_OPS idOp = (ID_OPS) emitFmtToOps[id->idInsFmt()];
- bool isCallIns = (id->idIns() == INS_bl) || (id->idIns() == INS_blr);
- bool maybeCallIns = (id->idIns() == INS_b) || (id->idIns() == INS_br);
-
- // A call instruction (ID_OP_CALL) may use a "fat" call descriptor
- // A local call to a label (i.e. call to a finally) cannot use a fat" call descriptor
+ bool isCallIns = (id->idIns() == INS_bl)
+ || (id->idIns() == INS_blr)
+ || (id->idIns() == INS_b_tail)
+ || (id->idIns() == INS_br_tail);
+ bool maybeCallIns = (id->idIns() == INS_b) || (id->idIns() == INS_br);
switch (idOp)
{
@@ -6888,7 +6888,7 @@ void emitter::emitIns_Call(EmitCallType callType,
if (isJump)
{
- ins = INS_br; // INS_br Reg
+ ins = INS_br_tail; // INS_br_tail Reg
}
else
{
@@ -6918,14 +6918,13 @@ void emitter::emitIns_Call(EmitCallType callType,
if (isJump)
{
- ins = INS_b; // INS_b imm28
- fmt = IF_BI_0A;
+ ins = INS_b_tail; // INS_b_tail imm28
}
else
{
ins = INS_bl; // INS_bl imm28
- fmt = IF_BI_0C;
}
+ fmt = IF_BI_0C;
id->idIns(ins);
id->idInsFmt(fmt);
@@ -8246,7 +8245,6 @@ size_t emitter::emitOutputInstr(insGroup *ig,
case IF_BI_0B: // BI_0B ......iiiiiiiiii iiiiiiiiiii..... simm19:00
assert(id->idGCref() == GCT_NONE);
assert(id->idIsBound());
-
dst = emitOutputLJ(ig, dst, id);
sz = sizeof(instrDescJmp);
break;
diff --git a/src/jit/importer.cpp b/src/jit/importer.cpp
index 7c24aa87e4..36dc33ba7b 100644
--- a/src/jit/importer.cpp
+++ b/src/jit/importer.cpp
@@ -9358,7 +9358,7 @@ RET:
break;
case CEE_JMP:
-
+
assert(!compIsForInlining());
if (tiVerificationNeeded)
@@ -9392,7 +9392,7 @@ RET:
BADCODE("Incompatible target for CEE_JMPs");
}
-#if defined(_TARGET_XARCH_) || defined(_TARGET_ARM_)
+#if defined(_TARGET_XARCH_) || defined(_TARGET_ARMARCH_)
op1 = new (this, GT_JMP) GenTreeVal(GT_JMP, TYP_VOID, (size_t) resolvedToken.hMethod);
@@ -9409,7 +9409,7 @@ RET:
goto APPEND;
-#else // !_TARGET_X86_ && !_TARGET_ARM_
+#else // !_TARGET_XARCH_ && !_TARGET_ARMARCH_
// Import this just like a series of LDARGs + tail. + call + ret
@@ -9446,7 +9446,7 @@ RET:
// And finish with the ret
goto RET;
-#endif // _TARGET_XXX
+#endif // _TARGET_XARCH_ || _TARGET_ARMARCH_
case CEE_LDELEMA :
assertImp(sz == sizeof(unsigned));
diff --git a/src/jit/instrsarm64.h b/src/jit/instrsarm64.h
index 51ec30e0db..7988fb11f6 100644
--- a/src/jit/instrsarm64.h
+++ b/src/jit/instrsarm64.h
@@ -590,7 +590,10 @@ INST1(adrp, "adrp", 0, 0, IF_DI_1E, 0x90000000)
INST1(b, "b", 0, 0, IF_BI_0A, 0x14000000)
// b simm26 BI_0A 000101iiiiiiiiii iiiiiiiiiiiiiiii 1400 0000 simm26:00
-
+
+INST1(b_tail, "b", 0, 0, IF_BI_0C, 0x14000000)
+ // b simm26 BI_0A 000101iiiiiiiiii iiiiiiiiiiiiiiii 1400 0000 simm26:00, same as b representing a tail call of bl.
+
INST1(bl_local,"bl", 0, 0, IF_BI_0A, 0x94000000)
// bl simm26 BI_0A 100101iiiiiiiiii iiiiiiiiiiiiiiii 9400 0000 simm26:00, same as bl, but with a BasicBlock target.
@@ -600,6 +603,9 @@ INST1(bl, "bl", 0, 0, IF_BI_0C, 0x94000000)
INST1(br, "br", 0, 0, IF_BR_1B, 0xD61F0000)
// br Rn BR_1B 1101011000011111 000000nnnnn00000 D61F 0000
+INST1(br_tail, "br", 0, 0, IF_BR_1B, 0xD61F0000)
+ // br Rn BR_1B 1101011000011111 000000nnnnn00000 D61F 0000, same as br representing a tail call of blr.
+
INST1(blr, "blr", 0, 0, IF_BR_1B, 0xD63F0000)
// blr Rn BR_1B 1101011000111111 000000nnnnn00000 D63F 0000
diff --git a/src/jit/lower.cpp b/src/jit/lower.cpp
index c09eff4737..9058758183 100644
--- a/src/jit/lower.cpp
+++ b/src/jit/lower.cpp
@@ -2672,9 +2672,7 @@ void Lowering::InsertPInvokeMethodEpilog(BasicBlock *returnBB
// Method doing Pinvoke calls has exactly one return block unless it has "jmp" or tail calls.
#ifdef DEBUG
bool endsWithTailCallOrJmp = false;
-#if FEATURE_FASTTAILCALL
endsWithTailCallOrJmp = returnBB->endsWithTailCallOrJmp(comp);
-#endif // FEATURE_FASTTAILCALL
assert(((returnBB == comp->genReturnBB) && (returnBB->bbJumpKind == BBJ_RETURN)) || endsWithTailCallOrJmp);
#endif // DEBUG
diff --git a/src/jit/lowerarm64.cpp b/src/jit/lowerarm64.cpp
index 5c53e253c8..82a6e3c12e 100644
--- a/src/jit/lowerarm64.cpp
+++ b/src/jit/lowerarm64.cpp
@@ -722,17 +722,14 @@ void Lowering::TreeNodeInfoInit(GenTree* stmt)
// If it is a fast tail call, it is already preferenced to use IP0.
// Therefore, no need set src candidates on call tgt again.
- if (tree->gtCall.IsVarargs() &&
- callHasFloatRegArgs &&
+ if (tree->gtCall.IsVarargs() &&
+ callHasFloatRegArgs &&
!tree->gtCall.IsFastTailCall() &&
(ctrlExpr != nullptr))
{
// Don't assign the call target to any of the argument registers because
// we will use them to also pass floating point arguments as required
// by Arm64 ABI.
-
- NYI_ARM64("Lower - IsVarargs");
-
ctrlExpr->gtLsraInfo.setSrcCandidates(l, l->allRegs(TYP_INT) & ~(RBM_ARG_REGS));
}
}
diff --git a/tests/arm64/Tests.lst b/tests/arm64/Tests.lst
index 028300eedc..8a43f8b779 100644
--- a/tests/arm64/Tests.lst
+++ b/tests/arm64/Tests.lst
@@ -11659,7 +11659,7 @@ RelativePath=JIT\jit64\mcc\interop\mcc_i03\mcc_i03.cmd
WorkingDir=JIT\jit64\mcc\interop\mcc_i03
Expected=0
MaxAllowedDurationSeconds=600
-Categories=Pri0;EXPECTED_FAIL
+Categories=Pri0;EXPECTED_PASS
HostStyle=0
[mcc_i04.cmd_1696]
RelativePath=JIT\jit64\mcc\interop\mcc_i04\mcc_i04.cmd
@@ -11715,7 +11715,7 @@ RelativePath=JIT\jit64\mcc\interop\mcc_i13\mcc_i13.cmd
WorkingDir=JIT\jit64\mcc\interop\mcc_i13
Expected=0
MaxAllowedDurationSeconds=600
-Categories=Pri0;EXPECTED_FAIL;ISSUE_2989
+Categories=Pri0;EXPECTED_PASS
HostStyle=0
[mcc_i14.cmd_1704]
RelativePath=JIT\jit64\mcc\interop\mcc_i14\mcc_i14.cmd
@@ -11771,7 +11771,7 @@ RelativePath=JIT\jit64\mcc\interop\mcc_i33\mcc_i33.cmd
WorkingDir=JIT\jit64\mcc\interop\mcc_i33
Expected=0
MaxAllowedDurationSeconds=600
-Categories=Pri0;EXPECTED_FAIL;ISSUE_2989
+Categories=Pri0;EXPECTED_PASS
HostStyle=0
[mcc_i34.cmd_1712]
RelativePath=JIT\jit64\mcc\interop\mcc_i34\mcc_i34.cmd
@@ -11827,7 +11827,7 @@ RelativePath=JIT\jit64\mcc\interop\mcc_i53\mcc_i53.cmd
WorkingDir=JIT\jit64\mcc\interop\mcc_i53
Expected=0
MaxAllowedDurationSeconds=600
-Categories=Pri0;EXPECTED_FAIL;ISSUE_2989
+Categories=Pri0;EXPECTED_PASS
HostStyle=0
[mcc_i54.cmd_1720]
RelativePath=JIT\jit64\mcc\interop\mcc_i54\mcc_i54.cmd
@@ -11883,7 +11883,7 @@ RelativePath=JIT\jit64\mcc\interop\mcc_i63\mcc_i63.cmd
WorkingDir=JIT\jit64\mcc\interop\mcc_i63
Expected=0
MaxAllowedDurationSeconds=600
-Categories=Pri0;EXPECTED_FAIL;ISSUE_2989
+Categories=Pri0;EXPECTED_PASS
HostStyle=0
[mcc_i64.cmd_1728]
RelativePath=JIT\jit64\mcc\interop\mcc_i64\mcc_i64.cmd
@@ -11939,7 +11939,7 @@ RelativePath=JIT\jit64\mcc\interop\mcc_i73\mcc_i73.cmd
WorkingDir=JIT\jit64\mcc\interop\mcc_i73
Expected=0
MaxAllowedDurationSeconds=600
-Categories=Pri0;EXPECTED_FAIL;ISSUE_2989
+Categories=Pri0;EXPECTED_PASS
HostStyle=0
[mcc_i74.cmd_1736]
RelativePath=JIT\jit64\mcc\interop\mcc_i74\mcc_i74.cmd
@@ -11995,7 +11995,7 @@ RelativePath=JIT\jit64\mcc\interop\mcc_i83\mcc_i83.cmd
WorkingDir=JIT\jit64\mcc\interop\mcc_i83
Expected=0
MaxAllowedDurationSeconds=600
-Categories=Pri0;EXPECTED_FAIL;ISSUE_2989
+Categories=Pri0;EXPECTED_PASS
HostStyle=0
[mcc_i84.cmd_1744]
RelativePath=JIT\jit64\mcc\interop\mcc_i84\mcc_i84.cmd