diff options
author | Brian Sullivan <briansul@microsoft.com> | 2019-01-10 16:29:42 -0800 |
---|---|---|
committer | GitHub <noreply@github.com> | 2019-01-10 16:29:42 -0800 |
commit | 459b58a7766707fb059a5762c7d72cb0af42a6ff (patch) | |
tree | d6023ccf22ceab28229b1c26d151a62d96c9db44 | |
parent | a2e33937c7cbff1e5eebb7848d8ce7c039812749 (diff) | |
parent | 12cfc7fbc6f7f44ba4f32fdfe9168057fc8da5ea (diff) | |
download | coreclr-459b58a7766707fb059a5762c7d72cb0af42a6ff.tar.gz coreclr-459b58a7766707fb059a5762c7d72cb0af42a6ff.tar.bz2 coreclr-459b58a7766707fb059a5762c7d72cb0af42a6ff.zip |
Merge pull request #17733 from mikedn/cc-cond2
Expand GT_JCC/SETCC condition support
-rw-r--r-- | src/jit/codegen.h | 63 | ||||
-rw-r--r-- | src/jit/codegenarm.cpp | 77 | ||||
-rw-r--r-- | src/jit/codegenarm64.cpp | 88 | ||||
-rw-r--r-- | src/jit/codegenarmarch.cpp | 259 | ||||
-rw-r--r-- | src/jit/codegencommon.cpp | 156 | ||||
-rw-r--r-- | src/jit/codegenlinear.cpp | 78 | ||||
-rw-r--r-- | src/jit/codegenxarch.cpp | 445 | ||||
-rw-r--r-- | src/jit/emitjmps.h | 4 | ||||
-rw-r--r-- | src/jit/emitxarch.cpp | 8 | ||||
-rw-r--r-- | src/jit/gentree.cpp | 4 | ||||
-rw-r--r-- | src/jit/gentree.h | 235 | ||||
-rw-r--r-- | src/jit/hwintrinsiccodegenxarch.cpp | 8 | ||||
-rw-r--r-- | src/jit/instr.cpp | 8 | ||||
-rw-r--r-- | src/jit/instrsxarch.h | 16 | ||||
-rw-r--r-- | src/jit/lower.cpp | 28 | ||||
-rw-r--r-- | src/jit/lowerxarch.cpp | 20 |
16 files changed, 562 insertions, 935 deletions
diff --git a/src/jit/codegen.h b/src/jit/codegen.h index 0a6f262a02..40042ba71f 100644 --- a/src/jit/codegen.h +++ b/src/jit/codegen.h @@ -92,23 +92,6 @@ private: } } - enum CompareKind - { - CK_SIGNED, - CK_UNSIGNED, - CK_LOGICAL - }; - static emitJumpKind genJumpKindForOper(genTreeOps cmp, CompareKind compareKind); - - // For a given compare oper tree, returns the conditions to use with jmp/set in 'jmpKind' array. - // The corresponding elements of jmpToTrueLabel indicate whether the target of the jump is to the - // 'true' label or a 'false' label. - // - // 'true' label corresponds to jump target of the current basic block i.e. the target to - // branch to on compare condition being true. 'false' label corresponds to the target to - // branch to on condition being false. - static void genJumpKindsForTree(GenTree* cmpTree, emitJumpKind jmpKind[2], bool jmpToTrueLabel[2]); - static bool genShouldRoundFP(); GenTreeIndir indirForm(var_types type, GenTree* base); @@ -1173,7 +1156,7 @@ protected: void genCallInstruction(GenTreeCall* call); void genJmpMethod(GenTree* jmp); BasicBlock* genCallFinally(BasicBlock* block); - void genCodeForJumpTrue(GenTree* tree); + void genCodeForJumpTrue(GenTreeOp* jtrue); #ifdef _TARGET_ARM64_ void genCodeForJumpCompare(GenTreeOp* tree); #endif // _TARGET_ARM64_ @@ -1393,6 +1376,50 @@ public: #ifdef _TARGET_XARCH_ instruction genMapShiftInsToShiftByConstantIns(instruction ins, int shiftByValue); #endif // _TARGET_XARCH_ + + // Maps a GenCondition code to a sequence of conditional jumps or other conditional instructions + // such as X86's SETcc. A sequence of instructions rather than just a single one is required for + // certain floating point conditions. + // For example, X86's UCOMISS sets ZF to indicate equality but it also sets it, together with PF, + // to indicate an unordered result. So for GenCondition::FEQ we first need to check if PF is 0 + // and then jump if ZF is 1: + // JP fallThroughBlock + // JE jumpDestBlock + // fallThroughBlock: + // ... + // jumpDestBlock: + // + // This is very similar to the way shortcircuit evaluation of bool AND and OR operators works so + // in order to make the GenConditionDesc mapping tables easier to read, a bool expression-like + // pattern is used to encode the above: + // { EJ_jnp, GT_AND, EJ_je } + // { EJ_jp, GT_OR, EJ_jne } + // + // For more details check inst_JCC and inst_SETCC functions. + // + struct GenConditionDesc + { + emitJumpKind jumpKind1; + genTreeOps oper; + emitJumpKind jumpKind2; + char padTo4Bytes; + + static const GenConditionDesc& Get(GenCondition condition) + { + assert(condition.GetCode() < _countof(map)); + const GenConditionDesc& desc = map[condition.GetCode()]; + assert(desc.jumpKind1 != EJ_NONE); + assert((desc.oper == GT_NONE) || (desc.oper == GT_AND) || (desc.oper == GT_OR)); + assert((desc.oper == GT_NONE) == (desc.jumpKind2 == EJ_NONE)); + return desc; + } + + private: + static const GenConditionDesc map[32]; + }; + + void inst_JCC(GenCondition condition, BasicBlock* target); + void inst_SETCC(GenCondition condition, var_types type, regNumber dstReg); }; /*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX diff --git a/src/jit/codegenarm.cpp b/src/jit/codegenarm.cpp index 21267a69c6..151704b3fa 100644 --- a/src/jit/codegenarm.cpp +++ b/src/jit/codegenarm.cpp @@ -301,8 +301,7 @@ void CodeGen::genLclHeap(GenTree* tree) genConsumeRegAndCopy(size, regCnt); endLabel = genCreateTempLabel(); getEmitter()->emitIns_R_R(INS_TEST, easz, regCnt, regCnt); - emitJumpKind jmpEqual = genJumpKindForOper(GT_EQ, CK_SIGNED); - inst_JMP(jmpEqual, endLabel); + inst_JMP(EJ_eq, endLabel); } stackAdjustment = 0; @@ -383,8 +382,7 @@ void CodeGen::genLclHeap(GenTree* tree) // Note that regCnt is the number of bytes to stack allocate. assert(genIsValidIntReg(regCnt)); getEmitter()->emitIns_R_I(INS_sub, EA_PTRSIZE, regCnt, STACK_ALIGN, INS_FLAGS_SET); - emitJumpKind jmpNotEqual = genJumpKindForOper(GT_NE, CK_SIGNED); - inst_JMP(jmpNotEqual, loop); + inst_JMP(EJ_ne, loop); } else { @@ -442,8 +440,7 @@ void CodeGen::genLclHeap(GenTree* tree) getEmitter()->emitIns_R_R_I(INS_sub, EA_PTRSIZE, regTmp, REG_SPBASE, compiler->eeGetPageSize()); getEmitter()->emitIns_R_R(INS_cmp, EA_PTRSIZE, regTmp, regCnt); - emitJumpKind jmpLTU = genJumpKindForOper(GT_LT, CK_UNSIGNED); - inst_JMP(jmpLTU, done); + inst_JMP(EJ_lo, done); // Update SP to be at the next page of stack that we will tickle getEmitter()->emitIns_R_R(INS_mov, EA_PTRSIZE, REG_SPBASE, regTmp); @@ -1137,7 +1134,7 @@ void CodeGen::genCodeForCompare(GenTreeOp* tree) // Are we evaluating this into a register? if (targetReg != REG_NA) { - genSetRegToCond(targetReg, tree); + inst_SETCC(GenCondition::FromRelop(tree), tree->TypeGet(), targetReg); genProduceReg(tree); } } @@ -1163,8 +1160,7 @@ void CodeGen::genCodeForReturnTrap(GenTreeOp* tree) BasicBlock* skipLabel = genCreateTempLabel(); - emitJumpKind jmpEqual = genJumpKindForOper(GT_EQ, CK_SIGNED); - inst_JMP(jmpEqual, skipLabel); + inst_JMP(EJ_eq, skipLabel); // emit the call to the EE-helper that stops for GC (or other reasons) @@ -1237,54 +1233,6 @@ void CodeGen::genCodeForStoreInd(GenTreeStoreInd* tree) } } -//------------------------------------------------------------------------ -// genSetRegToCond: Generate code to materialize a condition into a register. -// -// Arguments: -// dstReg - The target register to set to 1 or 0 -// tree - The GenTree Relop node that was used to set the Condition codes -// -// Return Value: none -// -// Preconditions: -// The condition codes must already have been appropriately set. -// -void CodeGen::genSetRegToCond(regNumber dstReg, GenTree* tree) -{ - // Emit code like that: - // ... - // beq True - // bvs True ; this second branch is typically absent - // movs rD, #0 - // b Next - // True: - // movs rD, #1 - // Next: - // ... - - emitJumpKind jumpKind[2]; - bool branchToTrueLabel[2]; - genJumpKindsForTree(tree, jumpKind, branchToTrueLabel); - - BasicBlock* labelTrue = genCreateTempLabel(); - getEmitter()->emitIns_J(emitter::emitJumpKindToIns(jumpKind[0]), labelTrue); - - if (jumpKind[1] != EJ_NONE) - { - getEmitter()->emitIns_J(emitter::emitJumpKindToIns(jumpKind[1]), labelTrue); - } - - getEmitter()->emitIns_R_I(INS_mov, emitActualTypeSize(tree->gtType), dstReg, 0); - - BasicBlock* labelNext = genCreateTempLabel(); - getEmitter()->emitIns_J(INS_b, labelNext); - - genDefineTempLabel(labelTrue); - getEmitter()->emitIns_R_I(INS_mov, emitActualTypeSize(tree->gtType), dstReg, 1); - genDefineTempLabel(labelNext); -} - -//------------------------------------------------------------------------ // genLongToIntCast: Generate code for long to int casts. // // Arguments: @@ -1336,17 +1284,14 @@ void CodeGen::genLongToIntCast(GenTree* cast) BasicBlock* success = genCreateTempLabel(); inst_RV_RV(INS_tst, loSrcReg, loSrcReg, TYP_INT, EA_4BYTE); - emitJumpKind JmpNegative = genJumpKindForOper(GT_LT, CK_LOGICAL); - inst_JMP(JmpNegative, allOne); + inst_JMP(EJ_mi, allOne); inst_RV_RV(INS_tst, hiSrcReg, hiSrcReg, TYP_INT, EA_4BYTE); - emitJumpKind jmpNotEqualL = genJumpKindForOper(GT_NE, CK_LOGICAL); - genJumpToThrowHlpBlk(jmpNotEqualL, SCK_OVERFLOW); + genJumpToThrowHlpBlk(EJ_ne, SCK_OVERFLOW); inst_JMP(EJ_jmp, success); genDefineTempLabel(allOne); inst_RV_IV(INS_cmp, hiSrcReg, -1, EA_4BYTE); - emitJumpKind jmpNotEqualS = genJumpKindForOper(GT_NE, CK_SIGNED); - genJumpToThrowHlpBlk(jmpNotEqualS, SCK_OVERFLOW); + genJumpToThrowHlpBlk(EJ_ne, SCK_OVERFLOW); genDefineTempLabel(success); } @@ -1355,13 +1300,11 @@ void CodeGen::genLongToIntCast(GenTree* cast) if ((srcType == TYP_ULONG) && (dstType == TYP_INT)) { inst_RV_RV(INS_tst, loSrcReg, loSrcReg, TYP_INT, EA_4BYTE); - emitJumpKind JmpNegative = genJumpKindForOper(GT_LT, CK_LOGICAL); - genJumpToThrowHlpBlk(JmpNegative, SCK_OVERFLOW); + genJumpToThrowHlpBlk(EJ_mi, SCK_OVERFLOW); } inst_RV_RV(INS_tst, hiSrcReg, hiSrcReg, TYP_INT, EA_4BYTE); - emitJumpKind jmpNotEqual = genJumpKindForOper(GT_NE, CK_LOGICAL); - genJumpToThrowHlpBlk(jmpNotEqual, SCK_OVERFLOW); + genJumpToThrowHlpBlk(EJ_ne, SCK_OVERFLOW); } } diff --git a/src/jit/codegenarm64.cpp b/src/jit/codegenarm64.cpp index 607153f04f..2baca7cfb9 100644 --- a/src/jit/codegenarm64.cpp +++ b/src/jit/codegenarm64.cpp @@ -1919,8 +1919,7 @@ void CodeGen::genLclHeap(GenTree* tree) genConsumeRegAndCopy(size, targetReg); endLabel = genCreateTempLabel(); getEmitter()->emitIns_R_R(INS_tst, easz, targetReg, targetReg); - emitJumpKind jmpEqual = genJumpKindForOper(GT_EQ, CK_SIGNED); - inst_JMP(jmpEqual, endLabel); + inst_JMP(EJ_eq, endLabel); // Compute the size of the block to allocate and perform alignment. // If compInitMem=true, we can reuse targetReg as regcnt, @@ -2040,8 +2039,7 @@ void CodeGen::genLclHeap(GenTree* tree) // Therefore we need to subtract 16 from regcnt here. assert(genIsValidIntReg(regCnt)); inst_RV_IV(INS_subs, regCnt, 16, emitActualTypeSize(type)); - emitJumpKind jmpNotEqual = genJumpKindForOper(GT_NE, CK_SIGNED); - inst_JMP(jmpNotEqual, loop); + inst_JMP(EJ_ne, loop); } else { @@ -2099,8 +2097,7 @@ void CodeGen::genLclHeap(GenTree* tree) getEmitter()->emitIns_R_R_I(INS_sub, EA_PTRSIZE, regTmp, REG_SPBASE, compiler->eeGetPageSize()); getEmitter()->emitIns_R_R(INS_cmp, EA_PTRSIZE, regTmp, regCnt); - emitJumpKind jmpLTU = genJumpKindForOper(GT_LT, CK_UNSIGNED); - inst_JMP(jmpLTU, done); + inst_JMP(EJ_lo, done); // Update SP to be at the next page of stack that we will tickle getEmitter()->emitIns_R_R(INS_mov, EA_PTRSIZE, REG_SPBASE, regTmp); @@ -2246,8 +2243,7 @@ void CodeGen::genCodeForDivMod(GenTreeOp* tree) { // Check if the divisor is zero throw a DivideByZeroException emit->emitIns_R_I(INS_cmp, size, divisorReg, 0); - emitJumpKind jmpEqual = genJumpKindForOper(GT_EQ, CK_SIGNED); - genJumpToThrowHlpBlk(jmpEqual, SCK_DIV_BY_ZERO); + genJumpToThrowHlpBlk(EJ_eq, SCK_DIV_BY_ZERO); } if (checkDividend) @@ -2255,8 +2251,7 @@ void CodeGen::genCodeForDivMod(GenTreeOp* tree) // Check if the divisor is not -1 branch to 'sdivLabel' emit->emitIns_R_I(INS_cmp, size, divisorReg, -1); - emitJumpKind jmpNotEqual = genJumpKindForOper(GT_NE, CK_SIGNED); - inst_JMP(jmpNotEqual, sdivLabel); + inst_JMP(EJ_ne, sdivLabel); // If control flow continues past here the 'divisorReg' is known to be -1 regNumber dividendReg = tree->gtGetOp1()->gtRegNum; @@ -2266,7 +2261,7 @@ void CodeGen::genCodeForDivMod(GenTreeOp* tree) // this will set both the Z and V flags only when dividendReg is MinInt // emit->emitIns_R_R_R(INS_adds, size, REG_ZR, dividendReg, dividendReg); - inst_JMP(jmpNotEqual, sdivLabel); // goto sdiv if the Z flag is clear + inst_JMP(EJ_ne, sdivLabel); // goto sdiv if the Z flag is clear genJumpToThrowHlpBlk(EJ_vs, SCK_ARITH_EXCPN); // if the V flags is set throw // ArithmeticException @@ -2287,8 +2282,7 @@ void CodeGen::genCodeForDivMod(GenTreeOp* tree) // divisorOp is not a constant, so it could be zero // emit->emitIns_R_I(INS_cmp, size, divisorReg, 0); - emitJumpKind jmpEqual = genJumpKindForOper(GT_EQ, CK_SIGNED); - genJumpToThrowHlpBlk(jmpEqual, SCK_DIV_BY_ZERO); + genJumpToThrowHlpBlk(EJ_eq, SCK_DIV_BY_ZERO); } genCodeForBinary(tree); } @@ -2998,8 +2992,7 @@ void CodeGen::genCodeForReturnTrap(GenTreeOp* tree) BasicBlock* skipLabel = genCreateTempLabel(); - emitJumpKind jmpEqual = genJumpKindForOper(GT_EQ, CK_SIGNED); - inst_JMP(jmpEqual, skipLabel); + inst_JMP(EJ_eq, skipLabel); // emit the call to the EE-helper that stops for GC (or other reasons) genEmitHelperCall(CORINFO_HELP_STOP_FOR_GC, 0, EA_UNKNOWN); @@ -3175,63 +3168,6 @@ void CodeGen::genCodeForSwap(GenTreeOp* tree) gcInfo.gcMarkRegPtrVal(oldOp1Reg, type2); } -//------------------------------------------------------------------------------------------- -// genSetRegToCond: Set a register 'dstReg' to the appropriate one or zero value -// corresponding to a binary Relational operator result. -// -// Arguments: -// dstReg - The target register to set to 1 or 0 -// tree - The GenTree Relop node that was used to set the Condition codes -// -// Return Value: none -// -// Notes: -// A full 64-bit value of either 1 or 0 is setup in the 'dstReg' -//------------------------------------------------------------------------------------------- - -void CodeGen::genSetRegToCond(regNumber dstReg, GenTree* tree) -{ - emitJumpKind jumpKind[2]; - bool branchToTrueLabel[2]; - genJumpKindsForTree(tree, jumpKind, branchToTrueLabel); - assert(jumpKind[0] != EJ_NONE); - - // Set the reg according to the flags - inst_SET(jumpKind[0], dstReg); - - // Do we need to use two operation to set the flags? - // - if (jumpKind[1] != EJ_NONE) - { - emitter* emit = getEmitter(); - bool ordered = ((tree->gtFlags & GTF_RELOP_NAN_UN) == 0); - insCond secondCond; - - // The only ones that require two operations are the - // floating point compare operations of BEQ or BNE.UN - // - if (tree->gtOper == GT_EQ) - { - // This must be an ordered comparison. - assert(ordered); - assert(jumpKind[1] == EJ_vs); // We complement this value - secondCond = INS_COND_VC; // for the secondCond - } - else // gtOper == GT_NE - { - // This must be BNE.UN (unordered comparison) - assert((tree->gtOper == GT_NE) && !ordered); - assert(jumpKind[1] == EJ_lo); // We complement this value - secondCond = INS_COND_HS; // for the secondCond - } - - // The second instruction is a 'csinc' instruction that either selects the previous dstReg - // or increments the ZR register, which produces a 1 result. - - emit->emitIns_R_R_R_COND(INS_csinc, EA_8BYTE, dstReg, dstReg, REG_ZR, secondCond); - } -} - //------------------------------------------------------------------------ // genIntToFloatCast: Generate code to cast an int/long to float/double // @@ -3424,8 +3360,7 @@ void CodeGen::genCkfinite(GenTree* treeNode) emit->emitIns_R_I(INS_cmp, EA_4BYTE, intReg, expMask); // If exponent is all 1's, throw ArithmeticException - emitJumpKind jmpEqual = genJumpKindForOper(GT_EQ, CK_SIGNED); - genJumpToThrowHlpBlk(jmpEqual, SCK_ARITH_EXCPN); + genJumpToThrowHlpBlk(EJ_eq, SCK_ARITH_EXCPN); // if it is a finite value copy it to targetReg if (treeNode->gtRegNum != fpReg) @@ -3499,7 +3434,7 @@ void CodeGen::genCodeForCompare(GenTreeOp* tree) // Are we evaluating this into a register? if (targetReg != REG_NA) { - genSetRegToCond(targetReg, tree); + inst_SETCC(GenCondition::FromRelop(tree), tree->TypeGet(), targetReg); genProduceReg(tree); } } @@ -5192,8 +5127,7 @@ void CodeGen::genHWIntrinsicSwitchTable(regNumber swReg, // Detect and throw out of range exception getEmitter()->emitIns_R_I(INS_cmp, EA_4BYTE, swReg, swMax); - emitJumpKind jmpGEU = genJumpKindForOper(GT_GE, CK_UNSIGNED); - genJumpToThrowHlpBlk(jmpGEU, SCK_ARG_RNG_EXCPN); + genJumpToThrowHlpBlk(EJ_hs, SCK_ARG_RNG_EXCPN); // Calculate switch target labelFirst->bbFlags |= BBF_JMP_TARGET; diff --git a/src/jit/codegenarmarch.cpp b/src/jit/codegenarmarch.cpp index 65a7ad7b65..9d2fff8b7a 100644 --- a/src/jit/codegenarmarch.cpp +++ b/src/jit/codegenarmarch.cpp @@ -282,7 +282,7 @@ void CodeGen::genCodeForTreeNode(GenTree* treeNode) break; case GT_JTRUE: - genCodeForJumpTrue(treeNode); + genCodeForJumpTrue(treeNode->AsOp()); break; #ifdef _TARGET_ARM64_ @@ -1331,13 +1331,13 @@ void CodeGen::genRangeCheck(GenTree* oper) // constant operand in the second position src1 = arrLen; src2 = arrIndex; - jmpKind = genJumpKindForOper(GT_LE, CK_UNSIGNED); + jmpKind = EJ_ls; } else { src1 = arrIndex; src2 = arrLen; - jmpKind = genJumpKindForOper(GT_GE, CK_UNSIGNED); + jmpKind = EJ_hs; } var_types bndsChkType = genActualType(src2->TypeGet()); @@ -1480,8 +1480,7 @@ void CodeGen::genCodeForArrIndex(GenTreeArrIndex* arrIndex) emit->emitIns_R_R_I(ins_Load(TYP_INT), EA_PTRSIZE, tmpReg, arrReg, offset); // a 4 BYTE sign extending load emit->emitIns_R_R(INS_cmp, EA_4BYTE, tgtReg, tmpReg); - emitJumpKind jmpGEU = genJumpKindForOper(GT_GE, CK_UNSIGNED); - genJumpToThrowHlpBlk(jmpGEU, SCK_RNGCHK_FAIL); + genJumpToThrowHlpBlk(EJ_hs, SCK_RNGCHK_FAIL); genProduceReg(arrIndex); } @@ -1688,7 +1687,7 @@ void CodeGen::genCodeForIndexAddr(GenTreeIndexAddr* node) // Generate the range check. getEmitter()->emitInsBinary(INS_cmp, emitActualTypeSize(TYP_I_IMPL), index, &arrLen); - genJumpToThrowHlpBlk(genJumpKindForOper(GT_GE, CK_UNSIGNED), SCK_RNGCHK_FAIL, node->gtIndRngFailBB); + genJumpToThrowHlpBlk(EJ_hs, SCK_RNGCHK_FAIL, node->gtIndRngFailBB); } // Can we use a ScaledAdd instruction? @@ -3156,204 +3155,72 @@ void CodeGen::genCreateAndStoreGCInfo(unsigned codeSize, compiler->compInfoBlkSize = 0; // not exposed by the GCEncoder interface } -//------------------------------------------------------------------------------------------- -// genJumpKindsForTree: Determine the number and kinds of conditional branches -// necessary to implement the given GT_CMP node -// -// Arguments: -// cmpTree - (input) The GenTree node that is used to set the Condition codes -// - The GenTree Relop node that was used to set the Condition codes -// jmpKind[2] - (output) One or two conditional branch instructions -// jmpToTrueLabel[2] - (output) On Arm64 both branches will always branch to the true label -// -// Return Value: -// Sets the proper values into the array elements of jmpKind[] and jmpToTrueLabel[] -// -// Assumptions: -// At least one conditional branch instruction will be returned. -// Typically only one conditional branch is needed -// and the second jmpKind[] value is set to EJ_NONE -// -void CodeGen::genJumpKindsForTree(GenTree* cmpTree, emitJumpKind jmpKind[2], bool jmpToTrueLabel[2]) +// clang-format off +const CodeGen::GenConditionDesc CodeGen::GenConditionDesc::map[32] { - // On ARM both branches will always branch to the true label - jmpToTrueLabel[0] = true; - jmpToTrueLabel[1] = true; - - // For integer comparisons just use genJumpKindForOper - if (!varTypeIsFloating(cmpTree->gtOp.gtOp1)) - { - CompareKind compareKind = ((cmpTree->gtFlags & GTF_UNSIGNED) != 0) ? CK_UNSIGNED : CK_SIGNED; - jmpKind[0] = genJumpKindForOper(cmpTree->gtOper, compareKind); - jmpKind[1] = EJ_NONE; - } - else // We have a Floating Point Compare operation - { - assert(cmpTree->OperIsCompare()); - - // For details on this mapping, see the ARM Condition Code table - // at section A8.3 in the ARMv7 architecture manual or - // at section C1.2.3 in the ARMV8 architecture manual. - - // We must check the GTF_RELOP_NAN_UN to find out - // if we need to branch when we have a NaN operand. - // - if ((cmpTree->gtFlags & GTF_RELOP_NAN_UN) != 0) - { - // Must branch if we have an NaN, unordered - switch (cmpTree->gtOper) - { - case GT_EQ: - jmpKind[0] = EJ_eq; // branch or set when equal (and no NaN's) - jmpKind[1] = EJ_vs; // branch or set when we have a NaN - break; - - case GT_NE: - jmpKind[0] = EJ_ne; // branch or set when not equal (or have NaN's) - jmpKind[1] = EJ_NONE; - break; - - case GT_LT: - jmpKind[0] = EJ_lt; // branch or set when less than (or have NaN's) - jmpKind[1] = EJ_NONE; - break; - - case GT_LE: - jmpKind[0] = EJ_le; // branch or set when less than or equal (or have NaN's) - jmpKind[1] = EJ_NONE; - break; - - case GT_GT: - jmpKind[0] = EJ_hi; // branch or set when greater than (or have NaN's) - jmpKind[1] = EJ_NONE; - break; - - case GT_GE: - jmpKind[0] = EJ_hs; // branch or set when greater than or equal (or have NaN's) - jmpKind[1] = EJ_NONE; - break; - - default: - unreached(); - } - } - else // ((cmpTree->gtFlags & GTF_RELOP_NAN_UN) == 0) - { - // Do not branch if we have an NaN, unordered - switch (cmpTree->gtOper) - { - case GT_EQ: - jmpKind[0] = EJ_eq; // branch or set when equal (and no NaN's) - jmpKind[1] = EJ_NONE; - break; - - case GT_NE: - jmpKind[0] = EJ_gt; // branch or set when greater than (and no NaN's) - jmpKind[1] = EJ_lo; // branch or set when less than (and no NaN's) - break; - - case GT_LT: - jmpKind[0] = EJ_lo; // branch or set when less than (and no NaN's) - jmpKind[1] = EJ_NONE; - break; - - case GT_LE: - jmpKind[0] = EJ_ls; // branch or set when less than or equal (and no NaN's) - jmpKind[1] = EJ_NONE; - break; - - case GT_GT: - jmpKind[0] = EJ_gt; // branch or set when greater than (and no NaN's) - jmpKind[1] = EJ_NONE; - break; - - case GT_GE: - jmpKind[0] = EJ_ge; // branch or set when greater than or equal (and no NaN's) - jmpKind[1] = EJ_NONE; - break; - - default: - unreached(); - } - } - } -} + { }, // NONE + { }, // 1 + { EJ_lt }, // SLT + { EJ_le }, // SLE + { EJ_ge }, // SGE + { EJ_gt }, // SGT + { EJ_mi }, // S + { EJ_pl }, // NS + + { EJ_eq }, // EQ + { EJ_ne }, // NE + { EJ_lo }, // ULT + { EJ_ls }, // ULE + { EJ_hs }, // UGE + { EJ_hi }, // UGT + { EJ_hs }, // C + { EJ_lo }, // NC + + { EJ_eq }, // FEQ + { EJ_gt, GT_AND, EJ_lo }, // FNE + { EJ_lo }, // FLT + { EJ_ls }, // FLE + { EJ_ge }, // FGE + { EJ_gt }, // FGT + { EJ_vs }, // O + { EJ_vc }, // NO + + { EJ_eq, GT_OR, EJ_vs }, // FEQU + { EJ_ne }, // FNEU + { EJ_lt }, // FLTU + { EJ_le }, // FLEU + { EJ_hs }, // FGEU + { EJ_hi }, // FGTU + { }, // P + { }, // NP +}; +// clang-format on //------------------------------------------------------------------------ -// genCodeForJumpTrue: Generates code for jmpTrue statement. +// inst_SETCC: Generate code to set a register to 0 or 1 based on a condition. // // Arguments: -// tree - The GT_JTRUE tree node. +// condition - The condition +// type - The type of the value to be produced +// dstReg - The destination register to be set to 1 or 0 // -// Return Value: -// None -// -void CodeGen::genCodeForJumpTrue(GenTree* tree) +void CodeGen::inst_SETCC(GenCondition condition, var_types type, regNumber dstReg) { - GenTree* cmp = tree->gtOp.gtOp1; - assert(cmp->OperIsCompare()); - assert(compiler->compCurBB->bbJumpKind == BBJ_COND); + assert(varTypeIsIntegral(type)); + assert(genIsValidIntReg(dstReg)); - // Get the "kind" and type of the comparison. Note that whether it is an unsigned cmp - // is governed by a flag NOT by the inherent type of the node - emitJumpKind jumpKind[2]; - bool branchToTrueLabel[2]; - genJumpKindsForTree(cmp, jumpKind, branchToTrueLabel); - assert(jumpKind[0] != EJ_NONE); +#ifdef _TARGET_ARM64_ + const GenConditionDesc& desc = GenConditionDesc::Get(condition); - // On ARM the branches will always branch to the true label - assert(branchToTrueLabel[0]); - inst_JMP(jumpKind[0], compiler->compCurBB->bbJumpDest); + inst_SET(desc.jumpKind1, dstReg); - if (jumpKind[1] != EJ_NONE) + if (desc.oper != GT_NONE) { - // the second conditional branch always has to be to the true label - assert(branchToTrueLabel[1]); - inst_JMP(jumpKind[1], compiler->compCurBB->bbJumpDest); + BasicBlock* labelNext = genCreateTempLabel(); + inst_JMP((desc.oper == GT_OR) ? desc.jumpKind1 : emitter::emitReverseJumpKind(desc.jumpKind1), labelNext); + inst_SET(desc.jumpKind2, dstReg); + genDefineTempLabel(labelNext); } -} - -//------------------------------------------------------------------------ -// genCodeForJcc: Produce code for a GT_JCC node. -// -// Arguments: -// tree - the node -// -void CodeGen::genCodeForJcc(GenTreeCC* tree) -{ - assert(compiler->compCurBB->bbJumpKind == BBJ_COND); - - CompareKind compareKind = ((tree->gtFlags & GTF_UNSIGNED) != 0) ? CK_UNSIGNED : CK_SIGNED; - emitJumpKind jumpKind = genJumpKindForOper(tree->gtCondition, compareKind); - - inst_JMP(jumpKind, compiler->compCurBB->bbJumpDest); -} - -//------------------------------------------------------------------------ -// genCodeForSetcc: Generates code for a GT_SETCC node. -// -// Arguments: -// setcc - the GT_SETCC node -// -// Assumptions: -// The condition represents an integer comparison. This code doesn't -// have the necessary logic to deal with floating point comparisons, -// in fact it doesn't even know if the comparison is integer or floating -// point because SETCC nodes do not have any operands. -// - -void CodeGen::genCodeForSetcc(GenTreeCC* setcc) -{ - regNumber dstReg = setcc->gtRegNum; - CompareKind compareKind = setcc->IsUnsigned() ? CK_UNSIGNED : CK_SIGNED; - emitJumpKind jumpKind = genJumpKindForOper(setcc->gtCondition, compareKind); - - assert(genIsValidIntReg(dstReg)); - // Make sure nobody is setting GTF_RELOP_NAN_UN on this node as it is ignored. - assert((setcc->gtFlags & GTF_RELOP_NAN_UN) == 0); - -#ifdef _TARGET_ARM64_ - inst_SET(jumpKind, dstReg); #else // Emit code like that: // ... @@ -3366,19 +3233,17 @@ void CodeGen::genCodeForSetcc(GenTreeCC* setcc) // ... BasicBlock* labelTrue = genCreateTempLabel(); - getEmitter()->emitIns_J(emitter::emitJumpKindToIns(jumpKind), labelTrue); + inst_JCC(condition, labelTrue); - getEmitter()->emitIns_R_I(INS_mov, emitActualTypeSize(setcc->TypeGet()), dstReg, 0); + getEmitter()->emitIns_R_I(INS_mov, emitActualTypeSize(type), dstReg, 0); BasicBlock* labelNext = genCreateTempLabel(); getEmitter()->emitIns_J(INS_b, labelNext); genDefineTempLabel(labelTrue); - getEmitter()->emitIns_R_I(INS_mov, emitActualTypeSize(setcc->TypeGet()), dstReg, 1); + getEmitter()->emitIns_R_I(INS_mov, emitActualTypeSize(type), dstReg, 1); genDefineTempLabel(labelNext); #endif - - genProduceReg(setcc); } //------------------------------------------------------------------------ diff --git a/src/jit/codegencommon.cpp b/src/jit/codegencommon.cpp index 99dc2904ca..1da1012482 100644 --- a/src/jit/codegencommon.cpp +++ b/src/jit/codegencommon.cpp @@ -1708,157 +1708,6 @@ FOUND_AM: return true; } -/***************************************************************************** -* The condition to use for (the jmp/set for) the given type of operation -* -* In case of amd64, this routine should be used when there is no gentree available -* and one needs to generate jumps based on integer comparisons. When gentree is -* available always use its overloaded version. -* -*/ - -// static -emitJumpKind CodeGen::genJumpKindForOper(genTreeOps cmp, CompareKind compareKind) -{ - const static BYTE genJCCinsSigned[] = { -#if defined(_TARGET_XARCH_) - EJ_je, // GT_EQ - EJ_jne, // GT_NE - EJ_jl, // GT_LT - EJ_jle, // GT_LE - EJ_jge, // GT_GE - EJ_jg, // GT_GT - EJ_je, // GT_TEST_EQ - EJ_jne, // GT_TEST_NE -#elif defined(_TARGET_ARMARCH_) - EJ_eq, // GT_EQ - EJ_ne, // GT_NE - EJ_lt, // GT_LT - EJ_le, // GT_LE - EJ_ge, // GT_GE - EJ_gt, // GT_GT -#if defined(_TARGET_ARM64_) - EJ_eq, // GT_TEST_EQ - EJ_ne, // GT_TEST_NE -#endif -#endif - }; - - const static BYTE genJCCinsUnsigned[] = /* unsigned comparison */ - { -#if defined(_TARGET_XARCH_) - EJ_je, // GT_EQ - EJ_jne, // GT_NE - EJ_jb, // GT_LT - EJ_jbe, // GT_LE - EJ_jae, // GT_GE - EJ_ja, // GT_GT - EJ_je, // GT_TEST_EQ - EJ_jne, // GT_TEST_NE -#elif defined(_TARGET_ARMARCH_) - EJ_eq, // GT_EQ - EJ_ne, // GT_NE - EJ_lo, // GT_LT - EJ_ls, // GT_LE - EJ_hs, // GT_GE - EJ_hi, // GT_GT -#if defined(_TARGET_ARM64_) - EJ_eq, // GT_TEST_EQ - EJ_ne, // GT_TEST_NE -#endif -#endif - }; - - const static BYTE genJCCinsLogical[] = /* logical operation */ - { -#if defined(_TARGET_XARCH_) - EJ_je, // GT_EQ (Z == 1) - EJ_jne, // GT_NE (Z == 0) - EJ_js, // GT_LT (S == 1) - EJ_NONE, // GT_LE - EJ_jns, // GT_GE (S == 0) - EJ_NONE, // GT_GT - EJ_NONE, // GT_TEST_EQ - EJ_NONE, // GT_TEST_NE -#elif defined(_TARGET_ARMARCH_) - EJ_eq, // GT_EQ (Z == 1) - EJ_ne, // GT_NE (Z == 0) - EJ_mi, // GT_LT (N == 1) - EJ_NONE, // GT_LE - EJ_pl, // GT_GE (N == 0) - EJ_NONE, // GT_GT -#if defined(_TARGET_ARM64_) - EJ_eq, // GT_TEST_EQ - EJ_ne, // GT_TEST_NE -#endif -#endif - }; - -#if defined(_TARGET_XARCH_) - assert(genJCCinsSigned[GT_EQ - GT_EQ] == EJ_je); - assert(genJCCinsSigned[GT_NE - GT_EQ] == EJ_jne); - assert(genJCCinsSigned[GT_LT - GT_EQ] == EJ_jl); - assert(genJCCinsSigned[GT_LE - GT_EQ] == EJ_jle); - assert(genJCCinsSigned[GT_GE - GT_EQ] == EJ_jge); - assert(genJCCinsSigned[GT_GT - GT_EQ] == EJ_jg); - assert(genJCCinsSigned[GT_TEST_EQ - GT_EQ] == EJ_je); - assert(genJCCinsSigned[GT_TEST_NE - GT_EQ] == EJ_jne); - - assert(genJCCinsUnsigned[GT_EQ - GT_EQ] == EJ_je); - assert(genJCCinsUnsigned[GT_NE - GT_EQ] == EJ_jne); - assert(genJCCinsUnsigned[GT_LT - GT_EQ] == EJ_jb); - assert(genJCCinsUnsigned[GT_LE - GT_EQ] == EJ_jbe); - assert(genJCCinsUnsigned[GT_GE - GT_EQ] == EJ_jae); - assert(genJCCinsUnsigned[GT_GT - GT_EQ] == EJ_ja); - assert(genJCCinsUnsigned[GT_TEST_EQ - GT_EQ] == EJ_je); - assert(genJCCinsUnsigned[GT_TEST_NE - GT_EQ] == EJ_jne); - - assert(genJCCinsLogical[GT_EQ - GT_EQ] == EJ_je); - assert(genJCCinsLogical[GT_NE - GT_EQ] == EJ_jne); - assert(genJCCinsLogical[GT_LT - GT_EQ] == EJ_js); - assert(genJCCinsLogical[GT_GE - GT_EQ] == EJ_jns); -#elif defined(_TARGET_ARMARCH_) - assert(genJCCinsSigned[GT_EQ - GT_EQ] == EJ_eq); - assert(genJCCinsSigned[GT_NE - GT_EQ] == EJ_ne); - assert(genJCCinsSigned[GT_LT - GT_EQ] == EJ_lt); - assert(genJCCinsSigned[GT_LE - GT_EQ] == EJ_le); - assert(genJCCinsSigned[GT_GE - GT_EQ] == EJ_ge); - assert(genJCCinsSigned[GT_GT - GT_EQ] == EJ_gt); - - assert(genJCCinsUnsigned[GT_EQ - GT_EQ] == EJ_eq); - assert(genJCCinsUnsigned[GT_NE - GT_EQ] == EJ_ne); - assert(genJCCinsUnsigned[GT_LT - GT_EQ] == EJ_lo); - assert(genJCCinsUnsigned[GT_LE - GT_EQ] == EJ_ls); - assert(genJCCinsUnsigned[GT_GE - GT_EQ] == EJ_hs); - assert(genJCCinsUnsigned[GT_GT - GT_EQ] == EJ_hi); - - assert(genJCCinsLogical[GT_EQ - GT_EQ] == EJ_eq); - assert(genJCCinsLogical[GT_NE - GT_EQ] == EJ_ne); - assert(genJCCinsLogical[GT_LT - GT_EQ] == EJ_mi); - assert(genJCCinsLogical[GT_GE - GT_EQ] == EJ_pl); -#else - assert(!"unknown arch"); -#endif - assert(GenTree::OperIsCompare(cmp)); - - emitJumpKind result = EJ_COUNT; - - if (compareKind == CK_UNSIGNED) - { - result = (emitJumpKind)genJCCinsUnsigned[cmp - GT_EQ]; - } - else if (compareKind == CK_SIGNED) - { - result = (emitJumpKind)genJCCinsSigned[cmp - GT_EQ]; - } - else if (compareKind == CK_LOGICAL) - { - result = (emitJumpKind)genJCCinsLogical[cmp - GT_EQ]; - } - assert(result != EJ_COUNT); - return result; -} - #ifdef _TARGET_ARMARCH_ //------------------------------------------------------------------------ // genEmitGSCookieCheck: Generate code to check that the GS cookie @@ -1899,9 +1748,8 @@ void CodeGen::genEmitGSCookieCheck(bool pushReg) // Compare with the GC cookie constant getEmitter()->emitIns_R_R(INS_cmp, EA_PTRSIZE, regGSConst, regGSValue); - BasicBlock* gsCheckBlk = genCreateTempLabel(); - emitJumpKind jmpEqual = genJumpKindForOper(GT_EQ, CK_SIGNED); - inst_JMP(jmpEqual, gsCheckBlk); + BasicBlock* gsCheckBlk = genCreateTempLabel(); + inst_JMP(EJ_eq, gsCheckBlk); // regGSConst and regGSValue aren't needed anymore, we can use them for helper call genEmitHelperCall(CORINFO_HELP_FAIL_FAST, 0, EA_UNKNOWN, regGSConst); genDefineTempLabel(gsCheckBlk); diff --git a/src/jit/codegenlinear.cpp b/src/jit/codegenlinear.cpp index 7da1896589..33108f1904 100644 --- a/src/jit/codegenlinear.cpp +++ b/src/jit/codegenlinear.cpp @@ -2132,3 +2132,81 @@ void CodeGen::genStoreLongLclVar(GenTree* treeNode) } } #endif // !defined(_TARGET_64BIT_) + +//------------------------------------------------------------------------ +// genCodeForJumpTrue: Generate code for a GT_JTRUE node. +// +// Arguments: +// jtrue - The node +// +void CodeGen::genCodeForJumpTrue(GenTreeOp* jtrue) +{ + assert(compiler->compCurBB->bbJumpKind == BBJ_COND); + assert(jtrue->OperIs(GT_JTRUE)); + + GenCondition condition = GenCondition::FromRelop(jtrue->gtGetOp1()); + + if (condition.PreferSwap()) + { + condition = GenCondition::Swap(condition); + } + + inst_JCC(condition, compiler->compCurBB->bbJumpDest); +} + +//------------------------------------------------------------------------ +// genCodeForJcc: Generate code for a GT_JCC node. +// +// Arguments: +// jcc - The node +// +void CodeGen::genCodeForJcc(GenTreeCC* jcc) +{ + assert(compiler->compCurBB->bbJumpKind == BBJ_COND); + assert(jcc->OperIs(GT_JCC)); + + inst_JCC(jcc->gtCondition, compiler->compCurBB->bbJumpDest); +} + +//------------------------------------------------------------------------ +// inst_JCC: Generate a conditional branch instruction sequence. +// +// Arguments: +// condition - The branch condition +// target - The basic block to jump to when the condition is true +// +void CodeGen::inst_JCC(GenCondition condition, BasicBlock* target) +{ + const GenConditionDesc& desc = GenConditionDesc::Get(condition); + + if (desc.oper == GT_NONE) + { + inst_JMP(desc.jumpKind1, target); + } + else if (desc.oper == GT_OR) + { + inst_JMP(desc.jumpKind1, target); + inst_JMP(desc.jumpKind2, target); + } + else // if (desc.oper == GT_AND) + { + BasicBlock* labelNext = genCreateTempLabel(); + inst_JMP(emitter::emitReverseJumpKind(desc.jumpKind1), labelNext); + inst_JMP(desc.jumpKind2, target); + genDefineTempLabel(labelNext); + } +} + +//------------------------------------------------------------------------ +// genCodeForSetcc: Generate code for a GT_SETCC node. +// +// Arguments: +// setcc - The node +// +void CodeGen::genCodeForSetcc(GenTreeCC* setcc) +{ + assert(setcc->OperIs(GT_SETCC)); + + inst_SETCC(setcc->gtCondition, setcc->TypeGet(), setcc->gtRegNum); + genProduceReg(setcc); +} diff --git a/src/jit/codegenxarch.cpp b/src/jit/codegenxarch.cpp index ec43cdd0f3..9abb25df85 100644 --- a/src/jit/codegenxarch.cpp +++ b/src/jit/codegenxarch.cpp @@ -215,9 +215,8 @@ void CodeGen::genEmitGSCookieCheck(bool pushReg) getEmitter()->emitIns_S_R(INS_cmp, EA_PTRSIZE, regGSCheck, compiler->lvaGSSecurityCookie, 0); } - BasicBlock* gsCheckBlk = genCreateTempLabel(); - emitJumpKind jmpEqual = genJumpKindForOper(GT_EQ, CK_SIGNED); - inst_JMP(jmpEqual, gsCheckBlk); + BasicBlock* gsCheckBlk = genCreateTempLabel(); + inst_JMP(EJ_je, gsCheckBlk); genEmitHelperCall(CORINFO_HELP_FAIL_FAST, 0, EA_UNKNOWN); genDefineTempLabel(gsCheckBlk); @@ -1385,108 +1384,88 @@ void CodeGen::genCodeForBT(GenTreeOp* bt) getEmitter()->emitIns_R_R(INS_bt, emitTypeSize(type), op2->gtRegNum, op1->gtRegNum); } +// clang-format off +const CodeGen::GenConditionDesc CodeGen::GenConditionDesc::map[32] +{ + { }, // NONE + { }, // 1 + { EJ_jl }, // SLT + { EJ_jle }, // SLE + { EJ_jge }, // SGE + { EJ_jg }, // SGT + { EJ_js }, // S + { EJ_jns }, // NS + + { EJ_je }, // EQ + { EJ_jne }, // NE + { EJ_jb }, // ULT + { EJ_jbe }, // ULE + { EJ_jae }, // UGE + { EJ_ja }, // UGT + { EJ_jb }, // C + { EJ_jae }, // NC + + // Floating point compare instructions (UCOMISS, UCOMISD etc.) set the condition flags as follows: + // ZF PF CF Meaning + // --------------------- + // 1 1 1 Unordered + // 0 0 0 Greater + // 0 0 1 Less Than + // 1 0 0 Equal + // + // Since ZF and CF are also set when the result is unordered, in some cases we first need to check + // PF before checking ZF/CF. In general, ordered conditions will result in a jump only if PF is not + // set and unordered conditions will result in a jump only if PF is set. + + { EJ_jnp, GT_AND, EJ_je }, // FEQ + { EJ_jne }, // FNE + { EJ_jnp, GT_AND, EJ_jb }, // FLT + { EJ_jnp, GT_AND, EJ_jbe }, // FLE + { EJ_jae }, // FGE + { EJ_ja }, // FGT + { EJ_jo }, // O + { EJ_jno }, // NO + + { EJ_je }, // FEQU + { EJ_jp, GT_OR, EJ_jne }, // FNEU + { EJ_jb }, // FLTU + { EJ_jbe }, // FLEU + { EJ_jp, GT_OR, EJ_jae }, // FGEU + { EJ_jp, GT_OR, EJ_ja }, // FGTU + { EJ_jp }, // P + { EJ_jnp }, // NP +}; +// clang-format on + //------------------------------------------------------------------------ -// genCodeForJumpTrue: Generates code for jmpTrue statement. +// inst_SETCC: Generate code to set a register to 0 or 1 based on a condition. // // Arguments: -// tree - The GT_JTRUE tree node. -// -// Return Value: -// None +// condition - The condition +// type - The type of the value to be produced +// dstReg - The destination register to be set to 1 or 0 // -void CodeGen::genCodeForJumpTrue(GenTree* tree) +void CodeGen::inst_SETCC(GenCondition condition, var_types type, regNumber dstReg) { - GenTree* cmp = tree->gtOp.gtOp1; + assert(varTypeIsIntegral(type)); + assert(genIsValidIntReg(dstReg) && isByteReg(dstReg)); - assert(cmp->OperIsCompare()); - assert(compiler->compCurBB->bbJumpKind == BBJ_COND); + const GenConditionDesc& desc = GenConditionDesc::Get(condition); -#if !defined(_TARGET_64BIT_) - // Long-typed compares should have been handled by Lowering::LowerCompare. - assert(!varTypeIsLong(cmp->gtGetOp1())); -#endif + inst_SET(desc.jumpKind1, dstReg); - // Get the "kind" and type of the comparison. Note that whether it is an unsigned cmp - // is governed by a flag NOT by the inherent type of the node - // TODO-XArch-CQ: Check if we can use the currently set flags. - emitJumpKind jumpKind[2]; - bool branchToTrueLabel[2]; - genJumpKindsForTree(cmp, jumpKind, branchToTrueLabel); - - BasicBlock* skipLabel = nullptr; - if (jumpKind[0] != EJ_NONE) + if (desc.oper != GT_NONE) { - BasicBlock* jmpTarget; - if (branchToTrueLabel[0]) - { - jmpTarget = compiler->compCurBB->bbJumpDest; - } - else - { - // This case arises only for ordered GT_EQ right now - assert((cmp->gtOper == GT_EQ) && ((cmp->gtFlags & GTF_RELOP_NAN_UN) == 0)); - skipLabel = genCreateTempLabel(); - jmpTarget = skipLabel; - } - - inst_JMP(jumpKind[0], jmpTarget); + BasicBlock* labelNext = genCreateTempLabel(); + inst_JMP((desc.oper == GT_OR) ? desc.jumpKind1 : emitter::emitReverseJumpKind(desc.jumpKind1), labelNext); + inst_SET(desc.jumpKind2, dstReg); + genDefineTempLabel(labelNext); } - if (jumpKind[1] != EJ_NONE) + if (!varTypeIsByte(type)) { - // the second conditional branch always has to be to the true label - assert(branchToTrueLabel[1]); - inst_JMP(jumpKind[1], compiler->compCurBB->bbJumpDest); + getEmitter()->emitIns_R_R(INS_movzx, EA_1BYTE, dstReg, dstReg); } - - if (skipLabel != nullptr) - { - genDefineTempLabel(skipLabel); - } -} - -//------------------------------------------------------------------------ -// genCodeForJcc: Produce code for a GT_JCC node. -// -// Arguments: -// tree - the node -// -void CodeGen::genCodeForJcc(GenTreeCC* tree) -{ - assert(compiler->compCurBB->bbJumpKind == BBJ_COND); - - CompareKind compareKind = ((tree->gtFlags & GTF_UNSIGNED) != 0) ? CK_UNSIGNED : CK_SIGNED; - emitJumpKind jumpKind = genJumpKindForOper(tree->gtCondition, compareKind); - - inst_JMP(jumpKind, compiler->compCurBB->bbJumpDest); -} - -//------------------------------------------------------------------------ -// genCodeForSetcc: Generates a setcc instruction for a GT_SETCC node. -// -// Arguments: -// tree - the GT_SETCC node -// -// Assumptions: -// The condition represents an integer comparison. This code doesn't -// have the necessary logic to deal with floating point comparisons, -// in fact it doesn't even know if the comparison is integer or floating -// point because SETCC nodes do not have any operands. -// - -void CodeGen::genCodeForSetcc(GenTreeCC* setcc) -{ - regNumber dstReg = setcc->gtRegNum; - CompareKind compareKind = setcc->IsUnsigned() ? CK_UNSIGNED : CK_SIGNED; - emitJumpKind jumpKind = genJumpKindForOper(setcc->gtCondition, compareKind); - - assert(genIsValidIntReg(dstReg) && isByteReg(dstReg)); - // Make sure nobody is setting GTF_RELOP_NAN_UN on this node as it is ignored. - assert((setcc->gtFlags & GTF_RELOP_NAN_UN) == 0); - - inst_SET(jumpKind, dstReg); - inst_RV_RV(ins_Move_Extend(TYP_UBYTE, true), dstReg, dstReg, TYP_UBYTE, emitTypeSize(TYP_UBYTE)); - genProduceReg(setcc); } //------------------------------------------------------------------------ @@ -1510,8 +1489,7 @@ void CodeGen::genCodeForReturnTrap(GenTreeOp* tree) BasicBlock* skipLabel = genCreateTempLabel(); - emitJumpKind jmpEqual = genJumpKindForOper(GT_EQ, CK_SIGNED); - inst_JMP(jmpEqual, skipLabel); + inst_JMP(EJ_je, skipLabel); // emit the call to the EE-helper that stops for GC (or other reasons) regNumber tmpReg = tree->GetSingleTempReg(RBM_ALLINT); @@ -1796,7 +1774,7 @@ void CodeGen::genCodeForTreeNode(GenTree* treeNode) break; case GT_JTRUE: - genCodeForJumpTrue(treeNode); + genCodeForJumpTrue(treeNode->AsOp()); break; case GT_JCC: @@ -6101,184 +6079,12 @@ void CodeGen::genLeaInstruction(GenTreeAddrMode* lea) genProduceReg(lea); } -//------------------------------------------------------------------------------------------- -// genJumpKindsForTree: Determine the number and kinds of conditional branches -// necessary to implement the given GT_CMP node -// -// Arguments: -// cmpTree - (input) The GenTree node that is used to set the Condition codes -// - The GenTree Relop node that was used to set the Condition codes -// jmpKind[2] - (output) One or two conditional branch instructions -// jmpToTrueLabel[2] - (output) When true we branch to the true case -// When false we create a second label and branch to the false case -// Only GT_EQ for a floating point compares can have a false value. -// -// Return Value: -// Sets the proper values into the array elements of jmpKind[] and jmpToTrueLabel[] -// -// Assumptions: -// At least one conditional branch instruction will be returned. -// Typically only one conditional branch is needed -// and the second jmpKind[] value is set to EJ_NONE -// -// Notes: -// jmpToTrueLabel[i]= true implies branch when the compare operation is true. -// jmpToTrueLabel[i]= false implies branch when the compare operation is false. -//------------------------------------------------------------------------------------------- - -// static -void CodeGen::genJumpKindsForTree(GenTree* cmpTree, emitJumpKind jmpKind[2], bool jmpToTrueLabel[2]) -{ - // Except for BEQ (= ordered GT_EQ) both jumps are to the true label. - jmpToTrueLabel[0] = true; - jmpToTrueLabel[1] = true; - - // For integer comparisons just use genJumpKindForOper - if (!varTypeIsFloating(cmpTree->gtOp.gtOp1)) - { - CompareKind compareKind = ((cmpTree->gtFlags & GTF_UNSIGNED) != 0) ? CK_UNSIGNED : CK_SIGNED; - jmpKind[0] = genJumpKindForOper(cmpTree->gtOper, compareKind); - jmpKind[1] = EJ_NONE; - } - else - { - assert(cmpTree->OperIsCompare()); - - // For details on how we arrived at this mapping, see the comment block in genCodeForTreeNode() - // while generating code for compare opererators (e.g. GT_EQ etc). - if ((cmpTree->gtFlags & GTF_RELOP_NAN_UN) != 0) - { - // Must branch if we have an NaN, unordered - switch (cmpTree->gtOper) - { - case GT_LT: - case GT_GT: - jmpKind[0] = EJ_jb; - jmpKind[1] = EJ_NONE; - break; - - case GT_LE: - case GT_GE: - jmpKind[0] = EJ_jbe; - jmpKind[1] = EJ_NONE; - break; - - case GT_NE: - jmpKind[0] = EJ_jpe; - jmpKind[1] = EJ_jne; - break; - - case GT_EQ: - jmpKind[0] = EJ_je; - jmpKind[1] = EJ_NONE; - break; - - default: - unreached(); - } - } - else // ((cmpTree->gtFlags & GTF_RELOP_NAN_UN) == 0) - { - // Do not branch if we have an NaN, unordered - switch (cmpTree->gtOper) - { - case GT_LT: - case GT_GT: - jmpKind[0] = EJ_ja; - jmpKind[1] = EJ_NONE; - break; - - case GT_LE: - case GT_GE: - jmpKind[0] = EJ_jae; - jmpKind[1] = EJ_NONE; - break; - - case GT_NE: - jmpKind[0] = EJ_jne; - jmpKind[1] = EJ_NONE; - break; - - case GT_EQ: - jmpKind[0] = EJ_jpe; - jmpKind[1] = EJ_je; - jmpToTrueLabel[0] = false; - break; - - default: - unreached(); - } - } - } -} - //------------------------------------------------------------------------ // genCompareFloat: Generate code for comparing two floating point values // // Arguments: // treeNode - the compare tree // -// Return Value: -// None. -// Comments: -// SSE2 instruction ucomis[s|d] is performs unordered comparison and -// updates rFLAGS register as follows. -// Result of compare ZF PF CF -// ----------------- ------------ -// Unordered 1 1 1 <-- this result implies one of operands of compare is a NAN. -// Greater 0 0 0 -// Less Than 0 0 1 -// Equal 1 0 0 -// -// From the above table the following equalities follow. As per ECMA spec *.UN opcodes perform -// unordered comparison of floating point values. That is *.UN comparisons result in true when -// one of the operands is a NaN whereas ordered comparisons results in false. -// -// Opcode Amd64 equivalent Comment -// ------ ----------------- -------- -// BLT.UN(a,b) ucomis[s|d] a, b Jb branches if CF=1, which means either a<b or unordered from the above -// jb table -// -// BLT(a,b) ucomis[s|d] b, a Ja branches if CF=0 and ZF=0, which means b>a that in turn implies a<b -// ja -// -// BGT.UN(a,b) ucomis[s|d] b, a branch if b<a or unordered ==> branch if a>b or unordered -// jb -// -// BGT(a, b) ucomis[s|d] a, b branch if a>b -// ja -// -// BLE.UN(a,b) ucomis[s|d] a, b jbe branches if CF=1 or ZF=1, which implies a<=b or unordered -// jbe -// -// BLE(a,b) ucomis[s|d] b, a jae branches if CF=0, which mean b>=a or a<=b -// jae -// -// BGE.UN(a,b) ucomis[s|d] b, a branch if b<=a or unordered ==> branch if a>=b or unordered -// jbe -// -// BGE(a,b) ucomis[s|d] a, b branch if a>=b -// jae -// -// BEQ.UN(a,b) ucomis[s|d] a, b branch if a==b or unordered. There is no BEQ.UN opcode in ECMA spec. -// je This case is given for completeness, in case if JIT generates such -// a gentree internally. -// -// BEQ(a,b) ucomis[s|d] a, b From the above table, PF=0 and ZF=1 corresponds to a==b. -// jpe L1 -// je <true label> -// L1: -// -// BNE(a,b) ucomis[s|d] a, b branch if a!=b. There is no BNE opcode in ECMA spec. This case is -// jne given for completeness, in case if JIT generates such a gentree -// internally. -// -// BNE.UN(a,b) ucomis[s|d] a, b From the above table, PF=1 or ZF=0 implies unordered or a!=b -// jpe <true label> -// jne <true label> -// -// As we can see from the above equalities that the operands of a compare operator need to be -// reversed in case of BLT/CLT, BGT.UN/CGT.UN, BLE/CLE, BGE.UN/CGE.UN. void CodeGen::genCompareFloat(GenTree* treeNode) { assert(treeNode->OperIsCompare()); @@ -6298,22 +6104,12 @@ void CodeGen::genCompareFloat(GenTree* treeNode) instruction ins; emitAttr cmpAttr; - bool reverseOps; - if ((tree->gtFlags & GTF_RELOP_NAN_UN) != 0) - { - // Unordered comparison case - reverseOps = (tree->gtOper == GT_GT || tree->gtOper == GT_GE); - } - else - { - reverseOps = (tree->gtOper == GT_LT || tree->gtOper == GT_LE); - } + GenCondition condition = GenCondition::FromFloatRelop(treeNode); - if (reverseOps) + if (condition.PreferSwap()) { - GenTree* tmp = op1; - op1 = op2; - op2 = tmp; + condition = GenCondition::Swap(condition); + std::swap(op1, op2); } ins = ins_FloatCompare(op1Type); @@ -6324,7 +6120,7 @@ void CodeGen::genCompareFloat(GenTree* treeNode) // Are we evaluating this into a register? if (targetReg != REG_NA) { - genSetRegToCond(targetReg, tree); + inst_SETCC(condition, treeNode->TypeGet(), targetReg); genProduceReg(tree); } } @@ -6433,94 +6229,11 @@ void CodeGen::genCompareInt(GenTree* treeNode) // Are we evaluating this into a register? if (targetReg != REG_NA) { - genSetRegToCond(targetReg, tree); + inst_SETCC(GenCondition::FromIntegralRelop(tree), tree->TypeGet(), targetReg); genProduceReg(tree); } } -//------------------------------------------------------------------------------------------- -// genSetRegToCond: Set a register 'dstReg' to the appropriate one or zero value -// corresponding to a binary Relational operator result. -// -// Arguments: -// dstReg - The target register to set to 1 or 0 -// tree - The GenTree Relop node that was used to set the Condition codes -// -// Return Value: none -// -// Notes: -// A full 64-bit value of either 1 or 0 is setup in the 'dstReg' -//------------------------------------------------------------------------------------------- - -void CodeGen::genSetRegToCond(regNumber dstReg, GenTree* tree) -{ - noway_assert((genRegMask(dstReg) & RBM_BYTE_REGS) != 0); - - emitJumpKind jumpKind[2]; - bool branchToTrueLabel[2]; - genJumpKindsForTree(tree, jumpKind, branchToTrueLabel); - - if (jumpKind[1] == EJ_NONE) - { - // Set (lower byte of) reg according to the flags - inst_SET(jumpKind[0], dstReg); - } - else - { -#ifdef DEBUG - // jmpKind[1] != EJ_NONE implies BEQ and BEN.UN of floating point values. - // These are represented by two conditions. - if (tree->gtOper == GT_EQ) - { - // This must be an ordered comparison. - assert((tree->gtFlags & GTF_RELOP_NAN_UN) == 0); - } - else - { - // This must be BNE.UN - assert((tree->gtOper == GT_NE) && ((tree->gtFlags & GTF_RELOP_NAN_UN) != 0)); - } -#endif - - // Here is the sample code generated in each case: - // BEQ == cmp, jpe <false label>, je <true label> - // That is, to materialize comparison reg needs to be set if PF=0 and ZF=1 - // setnp reg // if (PF==0) reg = 1 else reg = 0 - // jpe L1 // Jmp if PF==1 - // sete reg - // L1: - // - // BNE.UN == cmp, jpe <true label>, jne <true label> - // That is, to materialize the comparison reg needs to be set if either PF=1 or ZF=0; - // setp reg - // jpe L1 - // setne reg - // L1: - - // reverse the jmpkind condition before setting dstReg if it is to false label. - inst_SET(branchToTrueLabel[0] ? jumpKind[0] : emitter::emitReverseJumpKind(jumpKind[0]), dstReg); - - BasicBlock* label = genCreateTempLabel(); - inst_JMP(jumpKind[0], label); - - // second branch is always to true label - assert(branchToTrueLabel[1]); - inst_SET(jumpKind[1], dstReg); - genDefineTempLabel(label); - } - - var_types treeType = tree->TypeGet(); - if (treeType == TYP_INT || treeType == TYP_LONG) - { - // Set the higher bytes to 0 - inst_RV_RV(ins_Move_Extend(TYP_UBYTE, true), dstReg, dstReg, TYP_UBYTE, emitTypeSize(TYP_UBYTE)); - } - else - { - noway_assert(treeType == TYP_BYTE); - } -} - #if !defined(_TARGET_64BIT_) //------------------------------------------------------------------------ // genLongToIntCast: Generate code for long to int casts on x86. diff --git a/src/jit/emitjmps.h b/src/jit/emitjmps.h index 60815d13ea..af2d36bc62 100644 --- a/src/jit/emitjmps.h +++ b/src/jit/emitjmps.h @@ -21,8 +21,8 @@ JMP_SMALL(jbe , ja , jbe ) JMP_SMALL(ja , jbe , ja ) JMP_SMALL(js , jns , js ) JMP_SMALL(jns , js , jns ) -JMP_SMALL(jpe , jpo , jpe ) -JMP_SMALL(jpo , jpe , jpo ) +JMP_SMALL(jp , jnp , jp ) +JMP_SMALL(jnp , jp , jnp ) JMP_SMALL(jl , jge , jl ) JMP_SMALL(jge , jl , jge ) JMP_SMALL(jle , jg , jle ) diff --git a/src/jit/emitxarch.cpp b/src/jit/emitxarch.cpp index a7e40407b9..73a1632099 100644 --- a/src/jit/emitxarch.cpp +++ b/src/jit/emitxarch.cpp @@ -11088,8 +11088,8 @@ BYTE* emitter::emitOutputR(BYTE* dst, instrDesc* id) case INS_seta: case INS_sets: case INS_setns: - case INS_setpe: - case INS_setpo: + case INS_setp: + case INS_setnp: case INS_setl: case INS_setge: case INS_setle: @@ -12279,8 +12279,8 @@ BYTE* emitter::emitOutputLJ(BYTE* dst, instrDesc* i) assert(INS_ja + (INS_l_jmp - INS_jmp) == INS_l_ja); assert(INS_js + (INS_l_jmp - INS_jmp) == INS_l_js); assert(INS_jns + (INS_l_jmp - INS_jmp) == INS_l_jns); - assert(INS_jpe + (INS_l_jmp - INS_jmp) == INS_l_jpe); - assert(INS_jpo + (INS_l_jmp - INS_jmp) == INS_l_jpo); + assert(INS_jp + (INS_l_jmp - INS_jmp) == INS_l_jp); + assert(INS_jnp + (INS_l_jmp - INS_jmp) == INS_l_jnp); assert(INS_jl + (INS_l_jmp - INS_jmp) == INS_l_jl); assert(INS_jge + (INS_l_jmp - INS_jmp) == INS_l_jge); assert(INS_jle + (INS_l_jmp - INS_jmp) == INS_l_jle); diff --git a/src/jit/gentree.cpp b/src/jit/gentree.cpp index 3783ea8d36..d9831113f2 100644 --- a/src/jit/gentree.cpp +++ b/src/jit/gentree.cpp @@ -2275,7 +2275,7 @@ GenTree* Compiler::gtReverseCond(GenTree* tree) else if (tree->OperIs(GT_JCC, GT_SETCC)) { GenTreeCC* cc = tree->AsCC(); - cc->gtCondition = GenTree::ReverseRelop(cc->gtCondition); + cc->gtCondition = GenCondition::Reverse(cc->gtCondition); } else if (tree->OperIs(GT_JCMP)) { @@ -10400,7 +10400,7 @@ void Compiler::gtDispLeaf(GenTree* tree, IndentStack* indentStack) case GT_JCC: case GT_SETCC: - printf(" cond=%s", GenTree::OpName(tree->AsCC()->gtCondition)); + printf(" cond=%s", tree->AsCC()->gtCondition.Name()); break; case GT_JCMP: printf(" cond=%s%s", (tree->gtFlags & GTF_JCMP_TST) ? "TEST_" : "", diff --git a/src/jit/gentree.h b/src/jit/gentree.h index 2405e822ce..49c5682df0 100644 --- a/src/jit/gentree.h +++ b/src/jit/gentree.h @@ -5581,17 +5581,246 @@ struct GenTreeRuntimeLookup final : public GenTreeUnOp } }; +// Represents the condition of a GT_JCC or GT_SETCC node. + +struct GenCondition +{ + // clang-format off + enum Code : unsigned char + { + OperMask = 7, + Unsigned = 8, + Unordered = Unsigned, + Float = 16, + + // 0 would be the encoding of "signed EQ" but since equality is sign insensitive + // we'll use 0 as invalid/uninitialized condition code. This will also leave 1 + // as a spare code. + NONE = 0, + + SLT = 2, + SLE = 3, + SGE = 4, + SGT = 5, + S = 6, + NS = 7, + + EQ = Unsigned | 0, // = 8 + NE = Unsigned | 1, // = 9 + ULT = Unsigned | SLT, // = 10 + ULE = Unsigned | SLE, // = 11 + UGE = Unsigned | SGE, // = 12 + UGT = Unsigned | SGT, // = 13 + C = Unsigned | S, // = 14 + NC = Unsigned | NS, // = 15 + + FEQ = Float | EQ, // = 16 + FNE = Float | NE, // = 17 + FLT = Float | SLT, // = 18 + FLE = Float | SLE, // = 19 + FGE = Float | SGE, // = 20 + FGT = Float | SGT, // = 21 + O = Float | S, // = 22 + NO = Float | NS, // = 23 + + FEQU = Unordered | FEQ, // = 24 + FNEU = Unordered | FNE, // = 25 + FLTU = Unordered | FLT, // = 26 + FLEU = Unordered | FLE, // = 27 + FGEU = Unordered | FGE, // = 28 + FGTU = Unordered | FGT, // = 29 + P = Unordered | O, // = 30 + NP = Unordered | NO, // = 31 + }; + // clang-format on + +private: + Code m_code; + +public: + Code GetCode() const + { + return m_code; + } + + bool IsFlag() const + { + return (m_code & OperMask) >= S; + } + + bool IsUnsigned() const + { + return (ULT <= m_code) && (m_code <= UGT); + } + + bool IsFloat() const + { + return !IsFlag() && (m_code & Float) != 0; + } + + bool IsUnordered() const + { + return !IsFlag() && (m_code & (Float | Unordered)) == (Float | Unordered); + } + + bool Is(Code cond) const + { + return m_code == cond; + } + + template <typename... TRest> + bool Is(Code c, TRest... rest) const + { + return Is(c) || Is(rest...); + } + + // Indicate whether the condition should be swapped in order to avoid generating + // multiple branches. This happens for certain floating point conditions on XARCH, + // see GenConditionDesc and its associated mapping table for more details. + bool PreferSwap() const + { +#ifdef _TARGET_XARCH_ + return Is(GenCondition::FLT, GenCondition::FLE, GenCondition::FGTU, GenCondition::FGEU); +#else + return false; +#endif + } + + const char* Name() const + { + // clang-format off + static const char* names[] + { + "NONE", "???", "SLT", "SLE", "SGE", "SGT", "S", "NS", + "UEQ", "UNE", "ULT", "ULE", "UGE", "UGT", "C", "NC", + "FEQ", "FNE", "FLT", "FLE", "FGE", "FGT", "O", "NO", + "FEQU", "FNEU", "FLTU", "FLEU", "FGEU", "FGTU", "P", "NP" + }; + // clang-format on + + assert(m_code < _countof(names)); + return names[m_code]; + } + + GenCondition() : m_code() + { + } + + GenCondition(Code cond) : m_code(cond) + { + } + + static_assert((GT_NE - GT_EQ) == (NE & ~Unsigned), "bad relop"); + static_assert((GT_LT - GT_EQ) == SLT, "bad relop"); + static_assert((GT_LE - GT_EQ) == SLE, "bad relop"); + static_assert((GT_GE - GT_EQ) == SGE, "bad relop"); + static_assert((GT_GT - GT_EQ) == SGT, "bad relop"); + static_assert((GT_TEST_NE - GT_TEST_EQ) == (NE & ~Unsigned), "bad relop"); + + static GenCondition FromRelop(GenTree* relop) + { + assert(relop->OperIsCompare()); + + if (varTypeIsFloating(relop->gtGetOp1())) + { + return FromFloatRelop(relop); + } + else + { + return FromIntegralRelop(relop); + } + } + + static GenCondition FromFloatRelop(GenTree* relop) + { + assert(varTypeIsFloating(relop->gtGetOp1()) && varTypeIsFloating(relop->gtGetOp2())); + + return FromFloatRelop(relop->OperGet(), (relop->gtFlags & GTF_RELOP_NAN_UN) != 0); + } + + static GenCondition FromFloatRelop(genTreeOps oper, bool isUnordered) + { + assert(GenTree::OperIsCompare(oper)); + + unsigned code = oper - GT_EQ; + assert(code <= SGT); + code |= Float; + + if (isUnordered) + { + code |= Unordered; + } + + return GenCondition(static_cast<Code>(code)); + } + + static GenCondition FromIntegralRelop(GenTree* relop) + { + assert(!varTypeIsFloating(relop->gtGetOp1()) && !varTypeIsFloating(relop->gtGetOp2())); + + return FromIntegralRelop(relop->OperGet(), relop->IsUnsigned()); + } + + static GenCondition FromIntegralRelop(genTreeOps oper, bool isUnsigned) + { + assert(GenTree::OperIsCompare(oper)); + + // GT_TEST_EQ/NE are special, they need to be mapped as GT_EQ/NE + unsigned code = oper - ((oper >= GT_TEST_EQ) ? GT_TEST_EQ : GT_EQ); + + if (isUnsigned || (code <= 1)) // EQ/NE are treated as unsigned + { + code |= Unsigned; + } + + return GenCondition(static_cast<Code>(code)); + } + + static GenCondition Reverse(GenCondition condition) + { + // clang-format off + static const Code reverse[] + { + // EQ NE LT LE GE GT F NF + NONE, NONE, SGE, SGT, SLT, SLE, NS, S, + NE, EQ, UGE, UGT, ULT, ULE, NC, C, + FNEU, FEQU, FGEU, FGTU, FLTU, FLEU, NO, O, + FNE, FEQ, FGE, FGT, FLT, FGT, NP, P + }; + // clang-format on + + assert(condition.m_code < _countof(reverse)); + return GenCondition(reverse[condition.m_code]); + } + + static GenCondition Swap(GenCondition condition) + { + // clang-format off + static const Code swap[] + { + // EQ NE LT LE GE GT F NF + NONE, NONE, SGT, SGE, SLE, SLT, S, NS, + EQ, NE, UGT, UGE, ULE, ULT, C, NC, + FEQ, FNE, FGT, FGE, FLE, FLT, O, NO, + FEQU, FNEU, FGTU, FGEU, FLEU, FLTU, P, NP + }; + // clang-format on + + assert(condition.m_code < _countof(swap)); + return GenCondition(swap[condition.m_code]); + } +}; + // Represents a GT_JCC or GT_SETCC node. struct GenTreeCC final : public GenTree { - genTreeOps gtCondition; // any relop + GenCondition gtCondition; - GenTreeCC(genTreeOps oper, genTreeOps condition, var_types type = TYP_VOID) + GenTreeCC(genTreeOps oper, GenCondition condition, var_types type = TYP_VOID) : GenTree(oper, type DEBUGARG(/*largeNode*/ FALSE)), gtCondition(condition) { assert(OperIs(GT_JCC, GT_SETCC)); - assert(OperIsCompare(condition)); } #if DEBUGGABLE_GENTREE diff --git a/src/jit/hwintrinsiccodegenxarch.cpp b/src/jit/hwintrinsiccodegenxarch.cpp index 71a60fc3f5..84ca4779c5 100644 --- a/src/jit/hwintrinsiccodegenxarch.cpp +++ b/src/jit/hwintrinsiccodegenxarch.cpp @@ -1437,7 +1437,7 @@ void CodeGen::genSSEIntrinsic(GenTreeHWIntrinsic* node) assert(tmpReg != targetReg); genHWIntrinsic_R_RM(node, ins, emitTypeSize(TYP_SIMD16)); - emit->emitIns_R(INS_setpo, EA_1BYTE, targetReg); + emit->emitIns_R(INS_setnp, EA_1BYTE, targetReg); emit->emitIns_R(INS_sete, EA_1BYTE, tmpReg); emit->emitIns_R_R(INS_and, EA_1BYTE, tmpReg, targetReg); emit->emitIns_R_R(INS_movzx, EA_1BYTE, targetReg, tmpReg); @@ -1503,7 +1503,7 @@ void CodeGen::genSSEIntrinsic(GenTreeHWIntrinsic* node) assert(tmpReg != targetReg); genHWIntrinsic_R_RM(node, ins, emitTypeSize(TYP_SIMD16)); - emit->emitIns_R(INS_setpe, EA_1BYTE, targetReg); + emit->emitIns_R(INS_setp, EA_1BYTE, targetReg); emit->emitIns_R(INS_setne, EA_1BYTE, tmpReg); emit->emitIns_R_R(INS_or, EA_1BYTE, tmpReg, targetReg); emit->emitIns_R_R(INS_movzx, EA_1BYTE, targetReg, tmpReg); @@ -1615,7 +1615,7 @@ void CodeGen::genSSE2Intrinsic(GenTreeHWIntrinsic* node) assert(tmpReg != targetReg); genHWIntrinsic_R_RM(node, ins, emitTypeSize(TYP_SIMD16)); - emit->emitIns_R(INS_setpo, EA_1BYTE, targetReg); + emit->emitIns_R(INS_setnp, EA_1BYTE, targetReg); emit->emitIns_R(INS_sete, EA_1BYTE, tmpReg); emit->emitIns_R_R(INS_and, EA_1BYTE, tmpReg, targetReg); emit->emitIns_R_R(INS_movzx, EA_1BYTE, targetReg, tmpReg); @@ -1681,7 +1681,7 @@ void CodeGen::genSSE2Intrinsic(GenTreeHWIntrinsic* node) assert(tmpReg != targetReg); genHWIntrinsic_R_RM(node, ins, emitTypeSize(TYP_SIMD16)); - emit->emitIns_R(INS_setpe, EA_1BYTE, targetReg); + emit->emitIns_R(INS_setp, EA_1BYTE, targetReg); emit->emitIns_R(INS_setne, EA_1BYTE, tmpReg); emit->emitIns_R_R(INS_or, EA_1BYTE, tmpReg, targetReg); emit->emitIns_R_R(INS_movzx, EA_1BYTE, targetReg, tmpReg); diff --git a/src/jit/instr.cpp b/src/jit/instr.cpp index ce9e8d6fe3..cbab22be76 100644 --- a/src/jit/instr.cpp +++ b/src/jit/instr.cpp @@ -314,11 +314,11 @@ void CodeGen::inst_SET(emitJumpKind condition, regNumber reg) ins = INS_seta; break; - case EJ_jpe: - ins = INS_setpe; + case EJ_jp: + ins = INS_setp; break; - case EJ_jpo: - ins = INS_setpo; + case EJ_jnp: + ins = INS_setnp; break; default: diff --git a/src/jit/instrsxarch.h b/src/jit/instrsxarch.h index 27056cd241..2a8a54dd4c 100644 --- a/src/jit/instrsxarch.h +++ b/src/jit/instrsxarch.h @@ -103,8 +103,8 @@ INST3(cmovbe, "cmovbe", IUM_WR, BAD_CODE, BAD_CODE, INST3(cmova, "cmova", IUM_WR, BAD_CODE, BAD_CODE, 0x0F0047, INS_FLAGS_ReadsFlags) INST3(cmovs, "cmovs", IUM_WR, BAD_CODE, BAD_CODE, 0x0F0048, INS_FLAGS_ReadsFlags) INST3(cmovns, "cmovns", IUM_WR, BAD_CODE, BAD_CODE, 0x0F0049, INS_FLAGS_ReadsFlags) -INST3(cmovpe, "cmovpe", IUM_WR, BAD_CODE, BAD_CODE, 0x0F004A, INS_FLAGS_ReadsFlags) -INST3(cmovpo, "cmovpo", IUM_WR, BAD_CODE, BAD_CODE, 0x0F004B, INS_FLAGS_ReadsFlags) +INST3(cmovp, "cmovp", IUM_WR, BAD_CODE, BAD_CODE, 0x0F004A, INS_FLAGS_ReadsFlags) +INST3(cmovnp, "cmovnp", IUM_WR, BAD_CODE, BAD_CODE, 0x0F004B, INS_FLAGS_ReadsFlags) INST3(cmovl, "cmovl", IUM_WR, BAD_CODE, BAD_CODE, 0x0F004C, INS_FLAGS_ReadsFlags) INST3(cmovge, "cmovge", IUM_WR, BAD_CODE, BAD_CODE, 0x0F004D, INS_FLAGS_ReadsFlags) INST3(cmovle, "cmovle", IUM_WR, BAD_CODE, BAD_CODE, 0x0F004E, INS_FLAGS_ReadsFlags) @@ -706,8 +706,8 @@ INST1(setbe, "setbe", IUM_WR, 0x0F0096, INST1(seta, "seta", IUM_WR, 0x0F0097, INS_FLAGS_ReadsFlags) INST1(sets, "sets", IUM_WR, 0x0F0098, INS_FLAGS_ReadsFlags) INST1(setns, "setns", IUM_WR, 0x0F0099, INS_FLAGS_ReadsFlags) -INST1(setpe, "setpe", IUM_WR, 0x0F009A, INS_FLAGS_ReadsFlags) -INST1(setpo, "setpo", IUM_WR, 0x0F009B, INS_FLAGS_ReadsFlags) +INST1(setp, "setp", IUM_WR, 0x0F009A, INS_FLAGS_ReadsFlags) +INST1(setnp, "setnp", IUM_WR, 0x0F009B, INS_FLAGS_ReadsFlags) INST1(setl, "setl", IUM_WR, 0x0F009C, INS_FLAGS_ReadsFlags) INST1(setge, "setge", IUM_WR, 0x0F009D, INS_FLAGS_ReadsFlags) INST1(setle, "setle", IUM_WR, 0x0F009E, INS_FLAGS_ReadsFlags) @@ -732,8 +732,8 @@ INST0(jbe, "jbe", IUM_RD, 0x000076, INST0(ja, "ja", IUM_RD, 0x000077, INS_FLAGS_ReadsFlags) INST0(js, "js", IUM_RD, 0x000078, INS_FLAGS_ReadsFlags) INST0(jns, "jns", IUM_RD, 0x000079, INS_FLAGS_ReadsFlags) -INST0(jpe, "jpe", IUM_RD, 0x00007A, INS_FLAGS_ReadsFlags) -INST0(jpo, "jpo", IUM_RD, 0x00007B, INS_FLAGS_ReadsFlags) +INST0(jp, "jp", IUM_RD, 0x00007A, INS_FLAGS_ReadsFlags) +INST0(jnp, "jnp", IUM_RD, 0x00007B, INS_FLAGS_ReadsFlags) INST0(jl, "jl", IUM_RD, 0x00007C, INS_FLAGS_ReadsFlags) INST0(jge, "jge", IUM_RD, 0x00007D, INS_FLAGS_ReadsFlags) INST0(jle, "jle", IUM_RD, 0x00007E, INS_FLAGS_ReadsFlags) @@ -750,8 +750,8 @@ INST0(l_jbe, "jbe", IUM_RD, 0x00860F, INST0(l_ja, "ja", IUM_RD, 0x00870F, INS_FLAGS_ReadsFlags) INST0(l_js, "js", IUM_RD, 0x00880F, INS_FLAGS_ReadsFlags) INST0(l_jns, "jns", IUM_RD, 0x00890F, INS_FLAGS_ReadsFlags) -INST0(l_jpe, "jpe", IUM_RD, 0x008A0F, INS_FLAGS_ReadsFlags) -INST0(l_jpo, "jpo", IUM_RD, 0x008B0F, INS_FLAGS_ReadsFlags) +INST0(l_jp, "jp", IUM_RD, 0x008A0F, INS_FLAGS_ReadsFlags) +INST0(l_jnp, "jnp", IUM_RD, 0x008B0F, INS_FLAGS_ReadsFlags) INST0(l_jl, "jl", IUM_RD, 0x008C0F, INS_FLAGS_ReadsFlags) INST0(l_jge, "jge", IUM_RD, 0x008D0F, INS_FLAGS_ReadsFlags) INST0(l_jle, "jle", IUM_RD, 0x008E0F, INS_FLAGS_ReadsFlags) diff --git a/src/jit/lower.cpp b/src/jit/lower.cpp index 2ea2c9a5cf..55f44a71e0 100644 --- a/src/jit/lower.cpp +++ b/src/jit/lower.cpp @@ -914,16 +914,16 @@ bool Lowering::TryLowerSwitchToBitTest( // Rewire the blocks as needed and figure out the condition to use for JCC. // - genTreeOps bbSwitchCondition = GT_NONE; - bbSwitch->bbJumpKind = BBJ_COND; + GenCondition bbSwitchCondition; + bbSwitch->bbJumpKind = BBJ_COND; comp->fgRemoveAllRefPreds(bbCase1, bbSwitch); comp->fgRemoveAllRefPreds(bbCase0, bbSwitch); if (bbSwitch->bbNext == bbCase0) { - // GT_LT + GTF_UNSIGNED generates JC so we jump to bbCase1 when the bit is set - bbSwitchCondition = GT_LT; + // GenCondition::C generates JC so we jump to bbCase1 when the bit is set + bbSwitchCondition = GenCondition::C; bbSwitch->bbJumpDest = bbCase1; comp->fgAddRefPred(bbCase0, bbSwitch); @@ -933,8 +933,8 @@ bool Lowering::TryLowerSwitchToBitTest( { assert(bbSwitch->bbNext == bbCase1); - // GT_GE + GTF_UNSIGNED generates JNC so we jump to bbCase0 when the bit is not set - bbSwitchCondition = GT_GE; + // GenCondition::NC generates JNC so we jump to bbCase0 when the bit is not set + bbSwitchCondition = GenCondition::NC; bbSwitch->bbJumpDest = bbCase0; comp->fgAddRefPred(bbCase0, bbSwitch); @@ -950,7 +950,7 @@ bool Lowering::TryLowerSwitchToBitTest( GenTree* bitTest = comp->gtNewOperNode(GT_BT, TYP_VOID, bitTableIcon, switchValue); bitTest->gtFlags |= GTF_SET_FLAGS; GenTreeCC* jcc = new (comp, GT_JCC) GenTreeCC(GT_JCC, bbSwitchCondition); - jcc->gtFlags |= GTF_UNSIGNED | GTF_USE_FLAGS; + jcc->gtFlags |= GTF_USE_FLAGS; LIR::AsRange(bbSwitch).InsertAfter(switchValue, bitTableIcon, bitTest, jcc); @@ -2523,8 +2523,8 @@ GenTree* Lowering::DecomposeLongCompare(GenTree* cmp) GenTree* jcc = cmpUse.User(); jcc->gtOp.gtOp1 = nullptr; jcc->ChangeOper(GT_JCC); - jcc->gtFlags |= (cmp->gtFlags & GTF_UNSIGNED) | GTF_USE_FLAGS; - jcc->AsCC()->gtCondition = condition; + jcc->gtFlags |= GTF_USE_FLAGS; + jcc->AsCC()->gtCondition = GenCondition::FromIntegralRelop(condition, cmp->IsUnsigned()); } else { @@ -2532,7 +2532,7 @@ GenTree* Lowering::DecomposeLongCompare(GenTree* cmp) cmp->gtOp.gtOp2 = nullptr; cmp->ChangeOper(GT_SETCC); cmp->gtFlags |= GTF_USE_FLAGS; - cmp->AsCC()->gtCondition = condition; + cmp->AsCC()->gtCondition = GenCondition::FromIntegralRelop(condition, cmp->IsUnsigned()); } return cmp->gtNext; @@ -2734,7 +2734,7 @@ GenTree* Lowering::OptimizeConstCompare(GenTree* cmp) if (lsh->OperIs(GT_LSH) && varTypeIsIntOrI(lsh->TypeGet()) && lsh->gtGetOp1()->IsIntegralConst(1) && BlockRange().TryGetUse(cmp, &cmpUse)) { - genTreeOps condition = cmp->OperIs(GT_TEST_NE) ? GT_LT : GT_GE; + GenCondition condition = cmp->OperIs(GT_TEST_NE) ? GenCondition::C : GenCondition::NC; cmp->SetOper(GT_BT); cmp->gtType = TYP_VOID; @@ -2760,7 +2760,7 @@ GenTree* Lowering::OptimizeConstCompare(GenTree* cmp) cmpUse.ReplaceWith(comp, cc); } - cc->gtFlags |= GTF_USE_FLAGS | GTF_UNSIGNED; + cc->gtFlags |= GTF_USE_FLAGS; return cmp->gtNext; } @@ -2817,10 +2817,10 @@ GenTree* Lowering::OptimizeConstCompare(GenTree* cmp) ccOp = GT_SETCC; } - genTreeOps condition = cmp->OperGet(); + GenCondition condition = GenCondition::FromIntegralRelop(cmp); cc->ChangeOper(ccOp); cc->AsCC()->gtCondition = condition; - cc->gtFlags |= GTF_USE_FLAGS | (cmp->gtFlags & GTF_UNSIGNED); + cc->gtFlags |= GTF_USE_FLAGS; return next; } diff --git a/src/jit/lowerxarch.cpp b/src/jit/lowerxarch.cpp index 914b6a6ba3..0168e68406 100644 --- a/src/jit/lowerxarch.cpp +++ b/src/jit/lowerxarch.cpp @@ -836,7 +836,7 @@ void Lowering::LowerSIMD(GenTreeSIMD* simdNode) jtrue->ChangeOper(GT_JCC); GenTreeCC* jcc = jtrue->AsCC(); jcc->gtFlags |= GTF_USE_FLAGS; - jcc->gtCondition = (relopOp2Value == 0) ? GT_NE : GT_EQ; + jcc->gtCondition = (relopOp2Value == 0) ? GenCondition::NE : GenCondition::EQ; BlockRange().Remove(simdUser->gtGetOp2()); BlockRange().Remove(simdUser); @@ -854,8 +854,9 @@ void Lowering::LowerSIMD(GenTreeSIMD* simdNode) // to have to handle 2 cases (set flags/set destination register). // - genTreeOps condition = (simdNode->gtSIMDIntrinsicID == SIMDIntrinsicOpEquality) ? GT_EQ : GT_NE; - GenTreeCC* setcc = new (comp, GT_SETCC) GenTreeCC(GT_SETCC, condition, simdNode->TypeGet()); + GenCondition condition = + (simdNode->gtSIMDIntrinsicID == SIMDIntrinsicOpEquality) ? GenCondition::EQ : GenCondition::NE; + GenTreeCC* setcc = new (comp, GT_SETCC) GenTreeCC(GT_SETCC, condition, simdNode->TypeGet()); setcc->gtFlags |= GTF_USE_FLAGS; BlockRange().InsertAfter(simdNode, setcc); simdUse.ReplaceWith(comp, setcc); @@ -1892,19 +1893,8 @@ void Lowering::ContainCheckCompare(GenTreeOp* cmp) // The type of the operands has to be the same and no implicit conversions at this stage. assert(op1Type == op2Type); - bool reverseOps; - if ((cmp->gtFlags & GTF_RELOP_NAN_UN) != 0) - { - // Unordered comparison case - reverseOps = cmp->OperIs(GT_GT, GT_GE); - } - else - { - reverseOps = cmp->OperIs(GT_LT, GT_LE); - } - GenTree* otherOp; - if (reverseOps) + if (GenCondition::FromFloatRelop(cmp).PreferSwap()) { otherOp = op1; } |