summaryrefslogtreecommitdiff
path: root/src/jit/codegenarm.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'src/jit/codegenarm.cpp')
-rw-r--r--src/jit/codegenarm.cpp1185
1 files changed, 1015 insertions, 170 deletions
diff --git a/src/jit/codegenarm.cpp b/src/jit/codegenarm.cpp
index 73e51f2ef7..81f5889e3f 100644
--- a/src/jit/codegenarm.cpp
+++ b/src/jit/codegenarm.cpp
@@ -23,15 +23,9 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
#include "gcinfo.h"
#include "emit.h"
-#ifndef JIT32_GCENCODER
-#include "gcinfoencoder.h"
-#endif
-
-/*****************************************************************************
- *
- * Generate code that will set the given register to the integer constant.
- */
-
+//------------------------------------------------------------------------
+// genSetRegToIcon: Generate code that will set the given register to the integer constant.
+//
void CodeGen::genSetRegToIcon(regNumber reg, ssize_t val, var_types type, insFlags flags)
{
// Reg cannot be a FP reg
@@ -42,41 +36,78 @@ void CodeGen::genSetRegToIcon(regNumber reg, ssize_t val, var_types type, insFla
// code path.
noway_assert(type != TYP_REF || val == 0);
- if (val == 0)
- {
- instGen_Set_Reg_To_Zero(emitActualTypeSize(type), reg, flags);
- }
- else
- {
- // TODO-CQ: needs all the optimized cases
- getEmitter()->emitIns_R_I(INS_mov, emitActualTypeSize(type), reg, val);
- }
+ instGen_Set_Reg_To_Imm(emitActualTypeSize(type), reg, val, flags);
}
-/*****************************************************************************
- *
- * Generate code to check that the GS cookie wasn't thrashed by a buffer
- * overrun. If pushReg is true, preserve all registers around code sequence.
- * Otherwise, ECX maybe modified.
- */
+//------------------------------------------------------------------------
+// genEmitGSCookieCheck: Generate code to check that the GS cookie wasn't thrashed by a buffer overrun.
+//
void CodeGen::genEmitGSCookieCheck(bool pushReg)
{
NYI("ARM genEmitGSCookieCheck");
}
-BasicBlock* CodeGen::genCallFinally(BasicBlock* block, BasicBlock* lblk)
+//------------------------------------------------------------------------
+// genCallFinally: Generate a call to the finally block.
+//
+BasicBlock* CodeGen::genCallFinally(BasicBlock* block)
{
NYI("ARM genCallFinally");
return block;
}
-// move an immediate value into an integer register
-
+//------------------------------------------------------------------------
+// genEHCatchRet:
void CodeGen::genEHCatchRet(BasicBlock* block)
{
NYI("ARM genEHCatchRet");
}
+//---------------------------------------------------------------------
+// genIntrinsic - generate code for a given intrinsic
+//
+// Arguments
+// treeNode - the GT_INTRINSIC node
+//
+// Return value:
+// None
+//
+void CodeGen::genIntrinsic(GenTreePtr treeNode)
+{
+ // Both operand and its result must be of the same floating point type.
+ GenTreePtr srcNode = treeNode->gtOp.gtOp1;
+ assert(varTypeIsFloating(srcNode));
+ assert(srcNode->TypeGet() == treeNode->TypeGet());
+
+ // Right now only Abs/Round/Sqrt are treated as math intrinsics.
+ //
+ switch (treeNode->gtIntrinsic.gtIntrinsicId)
+ {
+ case CORINFO_INTRINSIC_Abs:
+ genConsumeOperands(treeNode->AsOp());
+ getEmitter()->emitInsBinary(INS_vabs, emitTypeSize(treeNode), treeNode, srcNode);
+ break;
+
+ case CORINFO_INTRINSIC_Round:
+ NYI_ARM("genIntrinsic for round - not implemented yet");
+ break;
+
+ case CORINFO_INTRINSIC_Sqrt:
+ genConsumeOperands(treeNode->AsOp());
+ getEmitter()->emitInsBinary(INS_vsqrt, emitTypeSize(treeNode), treeNode, srcNode);
+ break;
+
+ default:
+ assert(!"genIntrinsic: Unsupported intrinsic");
+ unreached();
+ }
+
+ genProduceReg(treeNode);
+}
+
+//------------------------------------------------------------------------
+// instGen_Set_Reg_To_Imm: Move an immediate value into an integer register.
+//
void CodeGen::instGen_Set_Reg_To_Imm(emitAttr size, regNumber reg, ssize_t imm, insFlags flags)
{
// reg cannot be a FP register
@@ -87,23 +118,60 @@ void CodeGen::instGen_Set_Reg_To_Imm(emitAttr size, regNumber reg, ssize_t imm,
size = EA_SIZE(size); // Strip any Reloc flags from size if we aren't doing relocs
}
- if ((imm == 0) && !EA_IS_RELOC(size))
+ if (EA_IS_RELOC(size))
+ {
+ getEmitter()->emitIns_R_I(INS_movw, size, reg, imm);
+ getEmitter()->emitIns_R_I(INS_movt, size, reg, imm);
+ }
+ else if (imm == 0)
{
instGen_Set_Reg_To_Zero(size, reg, flags);
}
else
{
- getEmitter()->emitIns_R_I(INS_mov, size, reg, imm);
+ if (arm_Valid_Imm_For_Mov(imm))
+ {
+ getEmitter()->emitIns_R_I(INS_mov, size, reg, imm, flags);
+ }
+ else // We have to use a movw/movt pair of instructions
+ {
+ ssize_t imm_lo16 = (imm & 0xffff);
+ ssize_t imm_hi16 = (imm >> 16) & 0xffff;
+
+ assert(arm_Valid_Imm_For_Mov(imm_lo16));
+ assert(imm_hi16 != 0);
+
+ getEmitter()->emitIns_R_I(INS_movw, size, reg, imm_lo16);
+
+ // If we've got a low register, the high word is all bits set,
+ // and the high bit of the low word is set, we can sign extend
+ // halfword and save two bytes of encoding. This can happen for
+ // small magnitude negative numbers 'n' for -32768 <= n <= -1.
+
+ if (getEmitter()->isLowRegister(reg) && (imm_hi16 == 0xffff) && ((imm_lo16 & 0x8000) == 0x8000))
+ {
+ getEmitter()->emitIns_R_R(INS_sxth, EA_2BYTE, reg, reg);
+ }
+ else
+ {
+ getEmitter()->emitIns_R_I(INS_movt, size, reg, imm_hi16);
+ }
+
+ if (flags == INS_FLAGS_SET)
+ getEmitter()->emitIns_R_R(INS_mov, size, reg, reg, INS_FLAGS_SET);
+ }
}
+
regTracker.rsTrackRegIntCns(reg, imm);
}
-/*****************************************************************************
- *
- * Generate code to set a register 'targetReg' of type 'targetType' to the constant
- * specified by the constant (GT_CNS_INT or GT_CNS_DBL) in 'tree'. This does not call
- * genProduceReg() on the target register.
- */
+//------------------------------------------------------------------------
+// genSetRegToConst: Generate code to set a register 'targetReg' of type 'targetType'
+// to the constant specified by the constant (GT_CNS_INT or GT_CNS_DBL) in 'tree'.
+//
+// Notes:
+// This does not call genProduceReg() on the target register.
+//
void CodeGen::genSetRegToConst(regNumber targetReg, var_types targetType, GenTreePtr tree)
{
switch (tree->gtOper)
@@ -130,7 +198,42 @@ void CodeGen::genSetRegToConst(regNumber targetReg, var_types targetType, GenTre
case GT_CNS_DBL:
{
- NYI("GT_CNS_DBL");
+ GenTreeDblCon* dblConst = tree->AsDblCon();
+ double constValue = dblConst->gtDblCon.gtDconVal;
+ // TODO-ARM-CQ: Do we have a faster/smaller way to generate 0.0 in thumb2 ISA ?
+ if (targetType == TYP_FLOAT)
+ {
+ // Get a temp integer register
+ regMaskTP tmpRegMask = tree->gtRsvdRegs;
+ regNumber tmpReg = genRegNumFromMask(tmpRegMask);
+ assert(tmpReg != REG_NA);
+
+ float f = forceCastToFloat(constValue);
+ genSetRegToIcon(tmpReg, *((int*)(&f)));
+ getEmitter()->emitIns_R_R(INS_vmov_i2f, EA_4BYTE, targetReg, tmpReg);
+ }
+ else
+ {
+ assert(targetType == TYP_DOUBLE);
+
+ unsigned* cv = (unsigned*)&constValue;
+
+ // Get two temp integer registers
+ regMaskTP tmpRegsMask = tree->gtRsvdRegs;
+ regMaskTP tmpRegMask = genFindHighestBit(tmpRegsMask); // set tmpRegMsk to a one-bit mask
+ regNumber tmpReg1 = genRegNumFromMask(tmpRegMask);
+ assert(tmpReg1 != REG_NA);
+
+ tmpRegsMask &= ~genRegMask(tmpReg1); // remove the bit for 'tmpReg1'
+ tmpRegMask = genFindHighestBit(tmpRegsMask); // set tmpRegMsk to a one-bit mask
+ regNumber tmpReg2 = genRegNumFromMask(tmpRegMask);
+ assert(tmpReg2 != REG_NA);
+
+ genSetRegToIcon(tmpReg1, cv[0]);
+ genSetRegToIcon(tmpReg2, cv[1]);
+
+ getEmitter()->emitIns_R_R_R(INS_vmov_i2d, EA_8BYTE, targetReg, tmpReg1, tmpReg2);
+ }
}
break;
@@ -139,18 +242,22 @@ void CodeGen::genSetRegToConst(regNumber targetReg, var_types targetType, GenTre
}
}
-/*****************************************************************************
- *
- * Generate code for a single node in the tree.
- * Preconditions: All operands have been evaluated
- *
- */
+//------------------------------------------------------------------------
+// genCodeForTreeNode Generate code for a single node in the tree.
+//
+// Preconditions:
+// All operands have been evaluated.
+//
void CodeGen::genCodeForTreeNode(GenTreePtr treeNode)
{
regNumber targetReg = treeNode->gtRegNum;
var_types targetType = treeNode->TypeGet();
emitter* emit = getEmitter();
+#ifdef DEBUG
+ lastConsumedNode = nullptr;
+#endif
+
JITDUMP("Generating: ");
DISPNODE(treeNode);
@@ -169,10 +276,33 @@ void CodeGen::genCodeForTreeNode(GenTreePtr treeNode)
genProduceReg(treeNode);
break;
- case GT_NEG:
case GT_NOT:
+ assert(!varTypeIsFloating(targetType));
+
+ __fallthrough;
+
+ case GT_NEG:
{
- NYI("GT_NEG and GT_NOT");
+ instruction ins = genGetInsForOper(treeNode->OperGet(), targetType);
+
+ // The arithmetic node must be sitting in a register (since it's not contained)
+ assert(!treeNode->isContained());
+ // The dst can only be a register.
+ assert(targetReg != REG_NA);
+
+ GenTreePtr operand = treeNode->gtGetOp1();
+ assert(!operand->isContained());
+ // The src must be a register.
+ regNumber operandReg = genConsumeReg(operand);
+
+ if (ins == INS_vneg)
+ {
+ getEmitter()->emitIns_R_R(ins, emitTypeSize(treeNode), targetReg, operandReg);
+ }
+ else
+ {
+ getEmitter()->emitIns_R_R_I(ins, emitTypeSize(treeNode), targetReg, operandReg, 0);
+ }
}
genProduceReg(treeNode);
break;
@@ -185,9 +315,10 @@ void CodeGen::genCodeForTreeNode(GenTreePtr treeNode)
case GT_ADD:
case GT_SUB:
+ case GT_MUL:
{
const genTreeOps oper = treeNode->OperGet();
- if ((oper == GT_ADD || oper == GT_SUB) && treeNode->gtOverflow())
+ if ((oper == GT_ADD || oper == GT_SUB || oper == GT_MUL) && treeNode->gtOverflow())
{
// This is also checked in the importer.
NYI("Overflow not yet implemented");
@@ -209,40 +340,47 @@ void CodeGen::genCodeForTreeNode(GenTreePtr treeNode)
genConsumeIfReg(op1);
genConsumeIfReg(op2);
- // This is the case of reg1 = reg1 op reg2
- // We're ready to emit the instruction without any moves
- if (op1reg == targetReg)
+ if (!varTypeIsFloating(targetType))
{
- dst = op1;
- src = op2;
- }
- // We have reg1 = reg2 op reg1
- // In order for this operation to be correct
- // we need that op is a commutative operation so
- // we can convert it into reg1 = reg1 op reg2 and emit
- // the same code as above
- else if (op2reg == targetReg)
- {
- noway_assert(GenTree::OperIsCommutative(treeNode->OperGet()));
- dst = op2;
- src = op1;
+ // This is the case of reg1 = reg1 op reg2
+ // We're ready to emit the instruction without any moves
+ if (op1reg == targetReg)
+ {
+ dst = op1;
+ src = op2;
+ }
+ // We have reg1 = reg2 op reg1
+ // In order for this operation to be correct
+ // we need that op is a commutative operation so
+ // we can convert it into reg1 = reg1 op reg2 and emit
+ // the same code as above
+ else if (op2reg == targetReg)
+ {
+ assert(GenTree::OperIsCommutative(treeNode->OperGet()));
+ dst = op2;
+ src = op1;
+ }
+ // dest, op1 and op2 registers are different:
+ // reg3 = reg1 op reg2
+ // We can implement this by issuing a mov:
+ // reg3 = reg1
+ // reg3 = reg3 op reg2
+ else
+ {
+ inst_RV_RV(ins_Move_Extend(targetType, true), targetReg, op1reg, op1->gtType);
+ regTracker.rsTrackRegCopy(targetReg, op1reg);
+ gcInfo.gcMarkRegPtrVal(targetReg, targetType);
+ dst = treeNode;
+ src = op2;
+ }
+
+ regNumber r = emit->emitInsBinary(ins, emitTypeSize(treeNode), dst, src);
+ assert(r == targetReg);
}
- // dest, op1 and op2 registers are different:
- // reg3 = reg1 op reg2
- // We can implement this by issuing a mov:
- // reg3 = reg1
- // reg3 = reg3 op reg2
else
{
- inst_RV_RV(ins_Move_Extend(targetType, true), targetReg, op1reg, op1->gtType);
- regTracker.rsTrackRegCopy(targetReg, op1reg);
- gcInfo.gcMarkRegPtrVal(targetReg, targetType);
- dst = treeNode;
- src = op2;
+ emit->emitIns_R_R_R(ins, emitTypeSize(treeNode), targetReg, op1reg, op2reg);
}
-
- regNumber r = emit->emitInsBinary(ins, emitTypeSize(treeNode), dst, src);
- noway_assert(r == targetReg);
}
genProduceReg(treeNode);
break;
@@ -429,17 +567,11 @@ void CodeGen::genCodeForTreeNode(GenTreePtr treeNode)
break;
case GT_IND:
+ genConsumeAddress(treeNode->AsIndir()->Addr());
emit->emitInsMov(ins_Load(treeNode->TypeGet()), emitTypeSize(treeNode), treeNode);
genProduceReg(treeNode);
break;
- case GT_MUL:
- {
- NYI("GT_MUL");
- }
- genProduceReg(treeNode);
- break;
-
case GT_MOD:
case GT_UDIV:
case GT_UMOD:
@@ -451,17 +583,45 @@ void CodeGen::genCodeForTreeNode(GenTreePtr treeNode)
case GT_DIV:
{
- NYI("GT_DIV");
- }
+ genConsumeOperands(treeNode->AsOp());
+
+ noway_assert(targetReg != REG_NA);
+
+ GenTreePtr dst = treeNode;
+ GenTreePtr src1 = treeNode->gtGetOp1();
+ GenTreePtr src2 = treeNode->gtGetOp2();
+ instruction ins = genGetInsForOper(treeNode->OperGet(), targetType);
+ emitAttr attr = emitTypeSize(treeNode);
+ regNumber result = REG_NA;
+
+ // dst can only be a reg
+ assert(!dst->isContained());
+
+ // src can be only reg
+ assert(!src1->isContained() || !src2->isContained());
+
+ if (varTypeIsFloating(targetType))
+ {
+ // Floating point divide never raises an exception
+
+ emit->emitIns_R_R_R(ins, attr, dst->gtRegNum, src1->gtRegNum, src2->gtRegNum);
+ }
+ else // an signed integer divide operation
+ {
+ // TODO-ARM-Bug: handle zero division exception.
+
+ emit->emitIns_R_R_R(ins, attr, dst->gtRegNum, src1->gtRegNum, src2->gtRegNum);
+ }
+
genProduceReg(treeNode);
- break;
+ }
+ break;
case GT_INTRINSIC:
{
- NYI("GT_INTRINSIC");
+ genIntrinsic(treeNode);
}
- genProduceReg(treeNode);
- break;
+ break;
case GT_EQ:
case GT_NE:
@@ -485,26 +645,12 @@ void CodeGen::genCodeForTreeNode(GenTreePtr treeNode)
emitAttr cmpAttr;
if (varTypeIsFloating(op1))
{
- NYI("Floating point compare");
-
- bool isUnordered = ((treeNode->gtFlags & GTF_RELOP_NAN_UN) != 0);
- switch (tree->OperGet())
- {
- case GT_EQ:
- ins = INS_beq;
- case GT_NE:
- ins = INS_bne;
- case GT_LT:
- ins = isUnordered ? INS_blt : INS_blo;
- case GT_LE:
- ins = isUnordered ? INS_ble : INS_bls;
- case GT_GE:
- ins = isUnordered ? INS_bpl : INS_bge;
- case GT_GT:
- ins = isUnordered ? INS_bhi : INS_bgt;
- default:
- unreached();
- }
+ assert(op1->TypeGet() == op2->TypeGet());
+ ins = INS_vcmp;
+ cmpAttr = emitTypeSize(op1->TypeGet());
+ emit->emitInsBinary(ins, cmpAttr, op1, op2);
+ // vmrs with register 0xf has special meaning of transferring flags
+ emit->emitIns_R(INS_vmrs, EA_4BYTE, REG_R15);
}
else
{
@@ -522,12 +668,12 @@ void CodeGen::genCodeForTreeNode(GenTreePtr treeNode)
bool op1Is64Bit = (varTypeIsLong(op1Type) || op1Type == TYP_REF);
bool op2Is64Bit = (varTypeIsLong(op2Type) || op2Type == TYP_REF);
NYI_IF(op1Is64Bit || op2Is64Bit, "Long compare");
- assert(!op1->isContainedMemoryOp() || op1Type == op2Type);
- assert(!op2->isContainedMemoryOp() || op1Type == op2Type);
+ assert(!op1->isUsedFromMemory() || op1Type == op2Type);
+ assert(!op2->isUsedFromMemory() || op1Type == op2Type);
cmpAttr = emitTypeSize(cmpType);
}
+ emit->emitInsBinary(ins, cmpAttr, op1, op2);
}
- emit->emitInsBinary(ins, cmpAttr, op1, op2);
// Are we evaluating this into a register?
if (targetReg != REG_NA)
@@ -579,7 +725,68 @@ void CodeGen::genCodeForTreeNode(GenTreePtr treeNode)
case GT_STOREIND:
{
- NYI("GT_STOREIND");
+ GenTreeStoreInd* storeInd = treeNode->AsStoreInd();
+ GenTree* data = storeInd->Data();
+ GenTree* addr = storeInd->Addr();
+ var_types targetType = storeInd->TypeGet();
+
+ assert(!varTypeIsFloating(targetType) || (targetType == data->TypeGet()));
+
+ GCInfo::WriteBarrierForm writeBarrierForm = gcInfo.gcIsWriteBarrierCandidate(treeNode, data);
+ if (writeBarrierForm != GCInfo::WBF_NoBarrier)
+ {
+ // data and addr must be in registers.
+ // Consume both registers so that any copies of interfering
+ // registers are taken care of.
+ genConsumeOperands(storeInd->AsOp());
+
+#if NOGC_WRITE_BARRIERS
+ NYI_ARM("NOGC_WRITE_BARRIERS");
+#else
+ // At this point, we should not have any interference.
+ // That is, 'data' must not be in REG_ARG_0,
+ // as that is where 'addr' must go.
+ noway_assert(data->gtRegNum != REG_ARG_0);
+
+ // addr goes in REG_ARG_0
+ if (addr->gtRegNum != REG_ARG_0)
+ {
+ inst_RV_RV(INS_mov, REG_ARG_0, addr->gtRegNum, addr->TypeGet());
+ }
+
+ // data goes in REG_ARG_1
+ if (data->gtRegNum != REG_ARG_1)
+ {
+ inst_RV_RV(INS_mov, REG_ARG_1, data->gtRegNum, data->TypeGet());
+ }
+#endif // NOGC_WRITE_BARRIERS
+
+ genGCWriteBarrier(storeInd, writeBarrierForm);
+ }
+ else // A normal store, not a WriteBarrier store
+ {
+ bool reverseOps = ((storeInd->gtFlags & GTF_REVERSE_OPS) != 0);
+ bool dataIsUnary = false;
+
+ // We must consume the operands in the proper execution order,
+ // so that liveness is updated appropriately.
+ if (!reverseOps)
+ {
+ genConsumeAddress(addr);
+ }
+
+ if (!data->isContained())
+ {
+ genConsumeRegs(data);
+ }
+
+ if (reverseOps)
+ {
+ genConsumeAddress(addr);
+ }
+
+ emit->emitInsMov(ins_Store(data->TypeGet()), emitTypeSize(storeInd), storeInd);
+ }
}
break;
@@ -682,7 +889,14 @@ void CodeGen::genCodeForTreeNode(GenTreePtr treeNode)
break;
case GT_NO_OP:
- NYI("GT_NO_OP");
+ if (treeNode->gtFlags & GTF_NO_OP_NO)
+ {
+ noway_assert(!"GTF_NO_OP_NO should not be set");
+ }
+ else
+ {
+ instGen(INS_nop);
+ }
break;
case GT_ARR_BOUNDS_CHECK:
@@ -733,13 +947,22 @@ void CodeGen::genCodeForTreeNode(GenTreePtr treeNode)
emit->emitIns_R_L(INS_lea, EA_PTRSIZE, genPendingCallLabel, treeNode->gtRegNum);
break;
+ case GT_CLS_VAR_ADDR:
+ emit->emitIns_R_C(INS_lea, EA_PTRSIZE, targetReg, treeNode->gtClsVar.gtClsVarHnd, 0);
+ genProduceReg(treeNode);
+ break;
+
+ case GT_IL_OFFSET:
+ // Do nothing; these nodes are simply markers for debug info.
+ break;
+
default:
{
#ifdef DEBUG
char message[256];
_snprintf_s(message, _countof(message), _TRUNCATE, "NYI: Unimplemented node type %s\n",
GenTree::NodeName(treeNode->OperGet()));
- notYetImplemented(message, __FILE__, __LINE__);
+ NYIRAW(message);
#else
NYI("unimplemented node");
#endif
@@ -748,24 +971,33 @@ void CodeGen::genCodeForTreeNode(GenTreePtr treeNode)
}
}
-// generate code for the locked operations:
-// GT_LOCKADD, GT_XCHG, GT_XADD
+//------------------------------------------------------------------------
+// genLockedInstructions: Generate code for the locked operations.
+//
+// Notes:
+// Handles GT_LOCKADD, GT_XCHG, GT_XADD nodes.
+//
void CodeGen::genLockedInstructions(GenTreeOp* treeNode)
{
NYI("genLockedInstructions");
}
-// generate code for GT_ARR_BOUNDS_CHECK node
+//------------------------------------------------------------------------
+// genRangeCheck: generate code for GT_ARR_BOUNDS_CHECK node.
+//
void CodeGen::genRangeCheck(GenTreePtr oper)
{
noway_assert(oper->OperGet() == GT_ARR_BOUNDS_CHECK);
GenTreeBoundsChk* bndsChk = oper->AsBoundsChk();
- GenTreePtr arrLen = bndsChk->gtArrLen->gtEffectiveVal();
GenTreePtr arrIdx = bndsChk->gtIndex->gtEffectiveVal();
+ GenTreePtr arrLen = bndsChk->gtArrLen->gtEffectiveVal();
GenTreePtr arrRef = NULL;
int lenOffset = 0;
+ genConsumeIfReg(arrIdx);
+ genConsumeIfReg(arrLen);
+
GenTree * src1, *src2;
emitJumpKind jmpKind;
@@ -784,15 +1016,13 @@ void CodeGen::genRangeCheck(GenTreePtr oper)
jmpKind = genJumpKindForOper(GT_GE, CK_UNSIGNED);
}
- genConsumeIfReg(src1);
- genConsumeIfReg(src2);
-
getEmitter()->emitInsBinary(INS_cmp, emitAttr(TYP_INT), src1, src2);
genJumpToThrowHlpBlk(jmpKind, SCK_RNGCHK_FAIL, bndsChk->gtIndRngFailBB);
}
-// make a temporary indir we can feed to pattern matching routines
-// in cases where we don't want to instantiate all the indirs that happen
+//------------------------------------------------------------------------
+// indirForm: Make a temporary indir we can feed to pattern matching routines
+// in cases where we don't want to instantiate all the indirs that happen.
//
GenTreeIndir CodeGen::indirForm(var_types type, GenTree* base)
{
@@ -804,8 +1034,9 @@ GenTreeIndir CodeGen::indirForm(var_types type, GenTree* base)
return i;
}
-// make a temporary int we can feed to pattern matching routines
-// in cases where we don't want to instantiate
+//------------------------------------------------------------------------
+// intForm: Make a temporary int we can feed to pattern matching routines
+// in cases where we don't want to instantiate.
//
GenTreeIntCon CodeGen::intForm(var_types type, ssize_t value)
{
@@ -817,6 +1048,9 @@ GenTreeIntCon CodeGen::intForm(var_types type, ssize_t value)
return i;
}
+//------------------------------------------------------------------------
+// genGetInsForOper: Return instruction encoding of the operation tree.
+//
instruction CodeGen::genGetInsForOper(genTreeOps oper, var_types type)
{
instruction ins;
@@ -835,6 +1069,9 @@ instruction CodeGen::genGetInsForOper(genTreeOps oper, var_types type)
case GT_MUL:
ins = INS_MUL;
break;
+ case GT_DIV:
+ ins = INS_sdiv;
+ break;
case GT_LSH:
ins = INS_SHIFT_LEFT_LOGICAL;
break;
@@ -878,21 +1115,331 @@ instruction CodeGen::genGetInsForOper(genTreeOps oper, var_types type)
//
void CodeGen::genCodeForShift(GenTreePtr tree)
{
- NYI("genCodeForShift");
+ var_types targetType = tree->TypeGet();
+ genTreeOps oper = tree->OperGet();
+ instruction ins = genGetInsForOper(oper, targetType);
+ emitAttr size = emitTypeSize(tree);
+
+ assert(tree->gtRegNum != REG_NA);
+
+ GenTreePtr operand = tree->gtGetOp1();
+ genConsumeReg(operand);
+
+ GenTreePtr shiftBy = tree->gtGetOp2();
+ if (!shiftBy->IsCnsIntOrI())
+ {
+ genConsumeReg(shiftBy);
+ getEmitter()->emitIns_R_R_R(ins, size, tree->gtRegNum, operand->gtRegNum, shiftBy->gtRegNum);
+ }
+ else
+ {
+ unsigned immWidth = size * BITS_PER_BYTE;
+ ssize_t shiftByImm = shiftBy->gtIntCon.gtIconVal & (immWidth - 1);
+
+ getEmitter()->emitIns_R_R_I(ins, size, tree->gtRegNum, operand->gtRegNum, shiftByImm);
+ }
+
+ genProduceReg(tree);
}
+//------------------------------------------------------------------------
+// genRegCopy: Generate a register copy.
+//
void CodeGen::genRegCopy(GenTree* treeNode)
{
NYI("genRegCopy");
}
-// Produce code for a GT_CALL node
+//------------------------------------------------------------------------
+// genCallInstruction: Produce code for a GT_CALL node
+//
void CodeGen::genCallInstruction(GenTreePtr node)
{
- NYI("Call not implemented");
+ GenTreeCall* call = node->AsCall();
+
+ assert(call->gtOper == GT_CALL);
+
+ gtCallTypes callType = (gtCallTypes)call->gtCallType;
+
+ IL_OFFSETX ilOffset = BAD_IL_OFFSET;
+
+ // all virtuals should have been expanded into a control expression
+ assert(!call->IsVirtual() || call->gtControlExpr || call->gtCallAddr);
+
+ // Consume all the arg regs
+ for (GenTreePtr list = call->gtCallLateArgs; list; list = list->MoveNext())
+ {
+ assert(list->OperIsList());
+
+ GenTreePtr argNode = list->Current();
+
+ fgArgTabEntryPtr curArgTabEntry = compiler->gtArgEntryByNode(call, argNode->gtSkipReloadOrCopy());
+ assert(curArgTabEntry);
+
+ if (curArgTabEntry->regNum == REG_STK)
+ continue;
+
+ // Deal with multi register passed struct args.
+ if (argNode->OperGet() == GT_FIELD_LIST)
+ {
+ GenTreeArgList* argListPtr = argNode->AsArgList();
+ unsigned iterationNum = 0;
+ regNumber argReg = curArgTabEntry->regNum;
+ for (; argListPtr != nullptr; argListPtr = argListPtr->Rest(), iterationNum++)
+ {
+ GenTreePtr putArgRegNode = argListPtr->gtOp.gtOp1;
+ assert(putArgRegNode->gtOper == GT_PUTARG_REG);
+
+ genConsumeReg(putArgRegNode);
+
+ if (putArgRegNode->gtRegNum != argReg)
+ {
+ inst_RV_RV(ins_Move_Extend(putArgRegNode->TypeGet(), putArgRegNode->InReg()), argReg,
+ putArgRegNode->gtRegNum);
+ }
+
+ argReg = genRegArgNext(argReg);
+ }
+ }
+ else
+ {
+ regNumber argReg = curArgTabEntry->regNum;
+ genConsumeReg(argNode);
+ if (argNode->gtRegNum != argReg)
+ {
+ inst_RV_RV(ins_Move_Extend(argNode->TypeGet(), argNode->InReg()), argReg, argNode->gtRegNum);
+ }
+ }
+
+ // In the case of a varargs call,
+ // the ABI dictates that if we have floating point args,
+ // we must pass the enregistered arguments in both the
+ // integer and floating point registers so, let's do that.
+ if (call->IsVarargs() && varTypeIsFloating(argNode))
+ {
+ NYI_ARM("CodeGen - IsVarargs");
+ }
+ }
+
+ // Insert a null check on "this" pointer if asked.
+ if (call->NeedsNullCheck())
+ {
+ const regNumber regThis = genGetThisArgReg(call);
+ const regNumber tmpReg = genRegNumFromMask(node->gtRsvdRegs);
+ getEmitter()->emitIns_R_R_I(INS_ldr, EA_4BYTE, tmpReg, regThis, 0);
+ }
+
+ // Either gtControlExpr != null or gtCallAddr != null or it is a direct non-virtual call to a user or helper method.
+ CORINFO_METHOD_HANDLE methHnd;
+ GenTree* target = call->gtControlExpr;
+ if (callType == CT_INDIRECT)
+ {
+ assert(target == nullptr);
+ target = call->gtCall.gtCallAddr;
+ methHnd = nullptr;
+ }
+ else
+ {
+ methHnd = call->gtCallMethHnd;
+ }
+
+ CORINFO_SIG_INFO* sigInfo = nullptr;
+#ifdef DEBUG
+ // Pass the call signature information down into the emitter so the emitter can associate
+ // native call sites with the signatures they were generated from.
+ if (callType != CT_HELPER)
+ {
+ sigInfo = call->callSig;
+ }
+#endif // DEBUG
+
+ // If fast tail call, then we are done.
+ if (call->IsFastTailCall())
+ {
+ NYI_ARM("fast tail call");
+ }
+
+ // For a pinvoke to unmanaged code we emit a label to clear
+ // the GC pointer state before the callsite.
+ // We can't utilize the typical lazy killing of GC pointers
+ // at (or inside) the callsite.
+ if (call->IsUnmanaged())
+ {
+ genDefineTempLabel(genCreateTempLabel());
+ }
+
+ // Determine return value size(s).
+ ReturnTypeDesc* pRetTypeDesc = call->GetReturnTypeDesc();
+ emitAttr retSize = EA_PTRSIZE;
+
+ if (call->HasMultiRegRetVal())
+ {
+ NYI_ARM("has multi reg ret val");
+ }
+ else
+ {
+ assert(!varTypeIsStruct(call));
+
+ if (call->gtType == TYP_REF || call->gtType == TYP_ARRAY)
+ {
+ retSize = EA_GCREF;
+ }
+ else if (call->gtType == TYP_BYREF)
+ {
+ retSize = EA_BYREF;
+ }
+ }
+
+ // We need to propagate the IL offset information to the call instruction, so we can emit
+ // an IL to native mapping record for the call, to support managed return value debugging.
+ // We don't want tail call helper calls that were converted from normal calls to get a record,
+ // so we skip this hash table lookup logic in that case.
+ if (compiler->opts.compDbgInfo && compiler->genCallSite2ILOffsetMap != nullptr && !call->IsTailCall())
+ {
+ (void)compiler->genCallSite2ILOffsetMap->Lookup(call, &ilOffset);
+ }
+
+ if (target != nullptr)
+ {
+ // For ARM a call target can not be a contained indirection
+ assert(!target->isContainedIndir());
+
+ // We have already generated code for gtControlExpr evaluating it into a register.
+ // We just need to emit "call reg" in this case.
+ //
+ assert(genIsValidIntReg(target->gtRegNum));
+
+ genEmitCall(emitter::EC_INDIR_R, methHnd,
+ INDEBUG_LDISASM_COMMA(sigInfo) nullptr, // addr
+ retSize, ilOffset, target->gtRegNum);
+ }
+ else
+ {
+ // Generate a direct call to a non-virtual user defined or helper method
+ assert(callType == CT_HELPER || callType == CT_USER_FUNC);
+
+ void* addr = nullptr;
+ if (callType == CT_HELPER)
+ {
+ // Direct call to a helper method.
+ CorInfoHelpFunc helperNum = compiler->eeGetHelperNum(methHnd);
+ noway_assert(helperNum != CORINFO_HELP_UNDEF);
+
+ void* pAddr = nullptr;
+ addr = compiler->compGetHelperFtn(helperNum, (void**)&pAddr);
+
+ if (addr == nullptr)
+ {
+ addr = pAddr;
+ }
+ }
+ else
+ {
+ // Direct call to a non-virtual user function.
+ CORINFO_ACCESS_FLAGS aflags = CORINFO_ACCESS_ANY;
+ if (call->IsSameThis())
+ {
+ aflags = (CORINFO_ACCESS_FLAGS)(aflags | CORINFO_ACCESS_THIS);
+ }
+
+ if ((call->NeedsNullCheck()) == 0)
+ {
+ aflags = (CORINFO_ACCESS_FLAGS)(aflags | CORINFO_ACCESS_NONNULL);
+ }
+
+ CORINFO_CONST_LOOKUP addrInfo;
+ compiler->info.compCompHnd->getFunctionEntryPoint(methHnd, &addrInfo, aflags);
+
+ addr = addrInfo.addr;
+ }
+
+ assert(addr);
+ // Non-virtual direct call to known addresses
+ if (!arm_Valid_Imm_For_BL((ssize_t)addr))
+ {
+ regNumber tmpReg = genRegNumFromMask(node->gtRsvdRegs);
+ instGen_Set_Reg_To_Imm(EA_HANDLE_CNS_RELOC, tmpReg, (ssize_t)addr);
+ genEmitCall(emitter::EC_INDIR_R, methHnd, INDEBUG_LDISASM_COMMA(sigInfo) NULL, retSize, ilOffset, tmpReg);
+ }
+ else
+ {
+ genEmitCall(emitter::EC_FUNC_TOKEN, methHnd, INDEBUG_LDISASM_COMMA(sigInfo) addr, retSize, ilOffset);
+ }
+ }
+
+ // if it was a pinvoke we may have needed to get the address of a label
+ if (genPendingCallLabel)
+ {
+ assert(call->IsUnmanaged());
+ genDefineTempLabel(genPendingCallLabel);
+ genPendingCallLabel = nullptr;
+ }
+
+ // Update GC info:
+ // All Callee arg registers are trashed and no longer contain any GC pointers.
+ // TODO-ARM-Bug?: As a matter of fact shouldn't we be killing all of callee trashed regs here?
+ // For now we will assert that other than arg regs gc ref/byref set doesn't contain any other
+ // registers from RBM_CALLEE_TRASH
+ assert((gcInfo.gcRegGCrefSetCur & (RBM_CALLEE_TRASH & ~RBM_ARG_REGS)) == 0);
+ assert((gcInfo.gcRegByrefSetCur & (RBM_CALLEE_TRASH & ~RBM_ARG_REGS)) == 0);
+ gcInfo.gcRegGCrefSetCur &= ~RBM_ARG_REGS;
+ gcInfo.gcRegByrefSetCur &= ~RBM_ARG_REGS;
+
+ var_types returnType = call->TypeGet();
+ if (returnType != TYP_VOID)
+ {
+ regNumber returnReg;
+
+ if (call->HasMultiRegRetVal())
+ {
+ assert(pRetTypeDesc != nullptr);
+ unsigned regCount = pRetTypeDesc->GetReturnRegCount();
+
+ // If regs allocated to call node are different from ABI return
+ // regs in which the call has returned its result, move the result
+ // to regs allocated to call node.
+ for (unsigned i = 0; i < regCount; ++i)
+ {
+ var_types regType = pRetTypeDesc->GetReturnRegType(i);
+ returnReg = pRetTypeDesc->GetABIReturnReg(i);
+ regNumber allocatedReg = call->GetRegNumByIdx(i);
+ if (returnReg != allocatedReg)
+ {
+ inst_RV_RV(ins_Copy(regType), allocatedReg, returnReg, regType);
+ }
+ }
+ }
+ else
+ {
+ if (varTypeIsFloating(returnType))
+ {
+ returnReg = REG_FLOATRET;
+ }
+ else
+ {
+ returnReg = REG_INTRET;
+ }
+
+ if (call->gtRegNum != returnReg)
+ {
+ inst_RV_RV(ins_Copy(returnType), call->gtRegNum, returnReg, returnType);
+ }
+ }
+
+ genProduceReg(call);
+ }
+
+ // If there is nothing next, that means the result is thrown away, so this value is not live.
+ // However, for minopts or debuggable code, we keep it live to support managed return value debugging.
+ if ((call->gtNext == nullptr) && !compiler->opts.MinOpts() && !compiler->opts.compDbgCode)
+ {
+ gcInfo.gcMarkRegSetNpt(RBM_INTRET);
+ }
}
-// produce code for a GT_LEA subnode
+//------------------------------------------------------------------------
+// genLeaInstruction: Produce code for a GT_LEA subnode.
+//
void CodeGen::genLeaInstruction(GenTreeAddrMode* lea)
{
if (lea->Base() && lea->Index())
@@ -909,12 +1456,44 @@ void CodeGen::genLeaInstruction(GenTreeAddrMode* lea)
genProduceReg(lea);
}
-// Generate code to materialize a condition into a register
-// (the condition codes must already have been appropriately set)
-
+//------------------------------------------------------------------------
+// genSetRegToCond: Generate code to materialize a condition into a register.
+//
+// Arguments:
+// dstReg - The target register to set to 1 or 0
+// tree - The GenTree Relop node that was used to set the Condition codes
+//
+// Return Value: none
+//
+// Preconditions:
+// The condition codes must already have been appropriately set.
+//
void CodeGen::genSetRegToCond(regNumber dstReg, GenTreePtr tree)
{
- NYI("genSetRegToCond");
+ // Emit code like that:
+ // ...
+ // bgt True
+ // movs rD, #0
+ // b Next
+ // True:
+ // movs rD, #1
+ // Next:
+ // ...
+
+ CompareKind compareKind = ((tree->gtFlags & GTF_UNSIGNED) != 0) ? CK_UNSIGNED : CK_SIGNED;
+ emitJumpKind jmpKind = genJumpKindForOper(tree->gtOper, compareKind);
+
+ BasicBlock* labelTrue = genCreateTempLabel();
+ getEmitter()->emitIns_J(emitter::emitJumpKindToIns(jmpKind), labelTrue);
+
+ getEmitter()->emitIns_R_I(INS_mov, emitActualTypeSize(tree->gtType), dstReg, 0);
+
+ BasicBlock* labelNext = genCreateTempLabel();
+ getEmitter()->emitIns_J(INS_b, labelNext);
+
+ genDefineTempLabel(labelTrue);
+ getEmitter()->emitIns_R_I(INS_mov, emitActualTypeSize(tree->gtType), dstReg, 1);
+ genDefineTempLabel(labelNext);
}
//------------------------------------------------------------------------
@@ -933,7 +1512,85 @@ void CodeGen::genSetRegToCond(regNumber dstReg, GenTreePtr tree)
//
void CodeGen::genIntToIntCast(GenTreePtr treeNode)
{
- NYI("Cast");
+ assert(treeNode->OperGet() == GT_CAST);
+
+ GenTreePtr castOp = treeNode->gtCast.CastOp();
+ emitter* emit = getEmitter();
+
+ var_types dstType = treeNode->CastToType();
+ var_types srcType = genActualType(castOp->TypeGet());
+ emitAttr movSize = emitActualTypeSize(dstType);
+ bool movRequired = false;
+
+ regNumber targetReg = treeNode->gtRegNum;
+ regNumber sourceReg = castOp->gtRegNum;
+
+ // For Long to Int conversion we will have a reserved integer register to hold the immediate mask
+ regNumber tmpReg = (treeNode->gtRsvdRegs == RBM_NONE) ? REG_NA : genRegNumFromMask(treeNode->gtRsvdRegs);
+
+ assert(genIsValidIntReg(targetReg));
+ assert(genIsValidIntReg(sourceReg));
+
+ instruction ins = INS_invalid;
+
+ genConsumeReg(castOp);
+ Lowering::CastInfo castInfo;
+
+ // Get information about the cast.
+ Lowering::getCastDescription(treeNode, &castInfo);
+
+ if (castInfo.requiresOverflowCheck)
+ {
+ NYI_ARM("CodeGen::genIntToIntCast for OverflowCheck");
+ }
+ else // Non-overflow checking cast.
+ {
+ if (genTypeSize(srcType) == genTypeSize(dstType))
+ {
+ ins = INS_mov;
+ }
+ else
+ {
+ var_types extendType = TYP_UNKNOWN;
+
+ // If we need to treat a signed type as unsigned
+ if ((treeNode->gtFlags & GTF_UNSIGNED) != 0)
+ {
+ extendType = genUnsignedType(srcType);
+ movSize = emitTypeSize(extendType);
+ movRequired = true;
+ }
+ else
+ {
+ if (genTypeSize(srcType) < genTypeSize(dstType))
+ {
+ extendType = srcType;
+ movSize = emitTypeSize(srcType);
+ if (srcType == TYP_UINT)
+ {
+ movRequired = true;
+ }
+ }
+ else // (genTypeSize(srcType) > genTypeSize(dstType))
+ {
+ extendType = dstType;
+ movSize = emitTypeSize(dstType);
+ }
+ }
+
+ ins = ins_Move_Extend(extendType, castOp->InReg());
+ }
+ }
+
+ // We should never be generating a load from memory instruction here!
+ assert(!emit->emitInsIsLoad(ins));
+
+ if ((ins != INS_mov) || movRequired || (targetReg != sourceReg))
+ {
+ emit->emitIns_R_R(ins, movSize, targetReg, sourceReg);
+ }
+
+ genProduceReg(treeNode);
}
//------------------------------------------------------------------------
@@ -952,7 +1609,39 @@ void CodeGen::genIntToIntCast(GenTreePtr treeNode)
//
void CodeGen::genFloatToFloatCast(GenTreePtr treeNode)
{
- NYI("Cast");
+ // float <--> double conversions are always non-overflow ones
+ assert(treeNode->OperGet() == GT_CAST);
+ assert(!treeNode->gtOverflow());
+
+ regNumber targetReg = treeNode->gtRegNum;
+ assert(genIsValidFloatReg(targetReg));
+
+ GenTreePtr op1 = treeNode->gtOp.gtOp1;
+ assert(!op1->isContained()); // Cannot be contained
+ assert(genIsValidFloatReg(op1->gtRegNum)); // Must be a valid float reg.
+
+ var_types dstType = treeNode->CastToType();
+ var_types srcType = op1->TypeGet();
+ assert(varTypeIsFloating(srcType) && varTypeIsFloating(dstType));
+
+ genConsumeOperands(treeNode->AsOp());
+
+ // treeNode must be a reg
+ assert(!treeNode->isContained());
+
+ if (srcType != dstType)
+ {
+ instruction insVcvt = (srcType == TYP_FLOAT) ? INS_vcvt_f2d // convert Float to Double
+ : INS_vcvt_d2f; // convert Double to Float
+
+ getEmitter()->emitIns_R_R(insVcvt, emitTypeSize(treeNode), treeNode->gtRegNum, op1->gtRegNum);
+ }
+ else if (treeNode->gtRegNum != op1->gtRegNum)
+ {
+ getEmitter()->emitIns_R_R(INS_vmov, emitTypeSize(treeNode), treeNode->gtRegNum, op1->gtRegNum);
+ }
+
+ genProduceReg(treeNode);
}
//------------------------------------------------------------------------
@@ -971,7 +1660,69 @@ void CodeGen::genFloatToFloatCast(GenTreePtr treeNode)
//
void CodeGen::genIntToFloatCast(GenTreePtr treeNode)
{
- NYI("Cast");
+ // int --> float/double conversions are always non-overflow ones
+ assert(treeNode->OperGet() == GT_CAST);
+ assert(!treeNode->gtOverflow());
+
+ regNumber targetReg = treeNode->gtRegNum;
+ assert(genIsValidFloatReg(targetReg));
+
+ GenTreePtr op1 = treeNode->gtOp.gtOp1;
+ assert(!op1->isContained()); // Cannot be contained
+ assert(genIsValidIntReg(op1->gtRegNum)); // Must be a valid int reg.
+
+ var_types dstType = treeNode->CastToType();
+ var_types srcType = op1->TypeGet();
+ assert(!varTypeIsFloating(srcType) && varTypeIsFloating(dstType));
+
+ // force the srcType to unsigned if GT_UNSIGNED flag is set
+ if (treeNode->gtFlags & GTF_UNSIGNED)
+ {
+ srcType = genUnsignedType(srcType);
+ }
+
+ // We should never see a srcType whose size is neither EA_4BYTE or EA_8BYTE
+ // For conversions from small types (byte/sbyte/int16/uint16) to float/double,
+ // we expect the front-end or lowering phase to have generated two levels of cast.
+ //
+ emitAttr srcSize = EA_ATTR(genTypeSize(srcType));
+ noway_assert((srcSize == EA_4BYTE) || (srcSize == EA_8BYTE));
+
+ instruction insVcvt = INS_invalid;
+
+ if (dstType == TYP_DOUBLE)
+ {
+ if (srcSize == EA_4BYTE)
+ {
+ insVcvt = (varTypeIsUnsigned(srcType)) ? INS_vcvt_u2d : INS_vcvt_i2d;
+ }
+ else
+ {
+ assert(srcSize == EA_8BYTE);
+ NYI_ARM("Casting int64/uint64 to double in genIntToFloatCast");
+ }
+ }
+ else
+ {
+ assert(dstType == TYP_FLOAT);
+ if (srcSize == EA_4BYTE)
+ {
+ insVcvt = (varTypeIsUnsigned(srcType)) ? INS_vcvt_u2f : INS_vcvt_i2f;
+ }
+ else
+ {
+ assert(srcSize == EA_8BYTE);
+ NYI_ARM("Casting int64/uint64 to float in genIntToFloatCast");
+ }
+ }
+
+ genConsumeOperands(treeNode->AsOp());
+
+ assert(insVcvt != INS_invalid);
+ getEmitter()->emitIns_R_R(INS_vmov_i2f, srcSize, treeNode->gtRegNum, op1->gtRegNum);
+ getEmitter()->emitIns_R_R(insVcvt, srcSize, treeNode->gtRegNum, treeNode->gtRegNum);
+
+ genProduceReg(treeNode);
}
//------------------------------------------------------------------------
@@ -990,31 +1741,72 @@ void CodeGen::genIntToFloatCast(GenTreePtr treeNode)
//
void CodeGen::genFloatToIntCast(GenTreePtr treeNode)
{
- NYI("Cast");
-}
+ // we don't expect to see overflow detecting float/double --> int type conversions here
+ // as they should have been converted into helper calls by front-end.
+ assert(treeNode->OperGet() == GT_CAST);
+ assert(!treeNode->gtOverflow());
-/*****************************************************************************
- *
- * Create and record GC Info for the function.
- */
-#ifdef JIT32_GCENCODER
-void*
-#else
-void
-#endif
-CodeGen::genCreateAndStoreGCInfo(unsigned codeSize, unsigned prologSize, unsigned epilogSize DEBUGARG(void* codePtr))
-{
-#ifdef JIT32_GCENCODER
- return genCreateAndStoreGCInfoJIT32(codeSize, prologSize, epilogSize DEBUGARG(codePtr));
-#else
- genCreateAndStoreGCInfoX64(codeSize, prologSize DEBUGARG(codePtr));
-#endif
-}
+ regNumber targetReg = treeNode->gtRegNum;
+ assert(genIsValidIntReg(targetReg)); // Must be a valid int reg.
+
+ GenTreePtr op1 = treeNode->gtOp.gtOp1;
+ assert(!op1->isContained()); // Cannot be contained
+ assert(genIsValidFloatReg(op1->gtRegNum)); // Must be a valid float reg.
+
+ var_types dstType = treeNode->CastToType();
+ var_types srcType = op1->TypeGet();
+ assert(varTypeIsFloating(srcType) && !varTypeIsFloating(dstType));
+
+ // We should never see a dstType whose size is neither EA_4BYTE or EA_8BYTE
+ // For conversions to small types (byte/sbyte/int16/uint16) from float/double,
+ // we expect the front-end or lowering phase to have generated two levels of cast.
+ //
+ emitAttr dstSize = EA_ATTR(genTypeSize(dstType));
+ noway_assert((dstSize == EA_4BYTE) || (dstSize == EA_8BYTE));
+
+ instruction insVcvt = INS_invalid;
+
+ if (srcType == TYP_DOUBLE)
+ {
+ if (dstSize == EA_4BYTE)
+ {
+ insVcvt = (varTypeIsUnsigned(dstType)) ? INS_vcvt_d2u : INS_vcvt_d2i;
+ }
+ else
+ {
+ assert(dstSize == EA_8BYTE);
+ NYI_ARM("Casting double to int64/uint64 in genIntToFloatCast");
+ }
+ }
+ else
+ {
+ assert(srcType == TYP_FLOAT);
+ if (dstSize == EA_4BYTE)
+ {
+ insVcvt = (varTypeIsUnsigned(dstType)) ? INS_vcvt_f2u : INS_vcvt_f2i;
+ }
+ else
+ {
+ assert(dstSize == EA_8BYTE);
+ NYI_ARM("Casting float to int64/uint64 in genIntToFloatCast");
+ }
+ }
-// TODO-ARM-Cleanup: It seems that the ARM JIT (classic and otherwise) uses this method, so it seems to be
-// inappropriately named?
+ genConsumeOperands(treeNode->AsOp());
+
+ assert(insVcvt != INS_invalid);
+ getEmitter()->emitIns_R_R(insVcvt, dstSize, op1->gtRegNum, op1->gtRegNum);
+ getEmitter()->emitIns_R_R(INS_vmov_f2i, dstSize, treeNode->gtRegNum, op1->gtRegNum);
+
+ genProduceReg(treeNode);
+}
-void CodeGen::genCreateAndStoreGCInfoX64(unsigned codeSize, unsigned prologSize DEBUGARG(void* codePtr))
+//------------------------------------------------------------------------
+// genCreateAndStoreGCInfo: Create and record GC Info for the function.
+//
+void CodeGen::genCreateAndStoreGCInfo(unsigned codeSize,
+ unsigned prologSize,
+ unsigned epilogSize DEBUGARG(void* codePtr))
{
IAllocator* allowZeroAlloc = new (compiler, CMK_GC) AllowZeroAllocator(compiler->getAllocatorGC());
GcInfoEncoder* gcInfoEncoder = new (compiler, CMK_GC)
@@ -1039,20 +1831,73 @@ void CodeGen::genCreateAndStoreGCInfoX64(unsigned codeSize, unsigned prologSize
compiler->compInfoBlkSize = 0; // not exposed by the GCEncoder interface
}
-/*****************************************************************************
- * Emit a call to a helper function.
- */
-
-void CodeGen::genEmitHelperCall(unsigned helper,
- int argSize,
- emitAttr retSize
-#ifndef LEGACY_BACKEND
- ,
- regNumber callTargetReg /*= REG_NA */
-#endif // !LEGACY_BACKEND
- )
+//------------------------------------------------------------------------
+// genEmitHelperCall: Emit a call to a helper function.
+//
+void CodeGen::genEmitHelperCall(unsigned helper, int argSize, emitAttr retSize, regNumber callTargetReg /*= REG_NA */)
{
- NYI("Helper call");
+ // Can we call the helper function directly
+
+ void *addr = NULL, **pAddr = NULL;
+
+#if defined(DEBUG) && defined(PROFILING_SUPPORTED)
+ // Don't ask VM if it hasn't requested ELT hooks
+ if (!compiler->compProfilerHookNeeded && compiler->opts.compJitELTHookEnabled &&
+ (helper == CORINFO_HELP_PROF_FCN_ENTER || helper == CORINFO_HELP_PROF_FCN_LEAVE ||
+ helper == CORINFO_HELP_PROF_FCN_TAILCALL))
+ {
+ addr = compiler->compProfilerMethHnd;
+ }
+ else
+#endif
+ {
+ addr = compiler->compGetHelperFtn((CorInfoHelpFunc)helper, (void**)&pAddr);
+ }
+
+ if (!addr || !arm_Valid_Imm_For_BL((ssize_t)addr))
+ {
+ if (callTargetReg == REG_NA)
+ {
+ // If a callTargetReg has not been explicitly provided, we will use REG_DEFAULT_HELPER_CALL_TARGET, but
+ // this is only a valid assumption if the helper call is known to kill REG_DEFAULT_HELPER_CALL_TARGET.
+ callTargetReg = REG_DEFAULT_HELPER_CALL_TARGET;
+ }
+
+ // Load the address into a register and call through a register
+ if (addr)
+ {
+ instGen_Set_Reg_To_Imm(EA_HANDLE_CNS_RELOC, callTargetReg, (ssize_t)addr);
+ }
+ else
+ {
+ getEmitter()->emitIns_R_AI(INS_ldr, EA_PTR_DSP_RELOC, callTargetReg, (ssize_t)pAddr);
+ regTracker.rsTrackRegTrash(callTargetReg);
+ }
+
+ getEmitter()->emitIns_Call(emitter::EC_INDIR_R, compiler->eeFindHelper(helper),
+ INDEBUG_LDISASM_COMMA(nullptr) NULL, // addr
+ argSize, retSize, gcInfo.gcVarPtrSetCur, gcInfo.gcRegGCrefSetCur,
+ gcInfo.gcRegByrefSetCur,
+ BAD_IL_OFFSET, // ilOffset
+ callTargetReg, // ireg
+ REG_NA, 0, 0, // xreg, xmul, disp
+ false, // isJump
+ emitter::emitNoGChelper(helper),
+ (CorInfoHelpFunc)helper == CORINFO_HELP_PROF_FCN_LEAVE);
+ }
+ else
+ {
+ getEmitter()->emitIns_Call(emitter::EC_FUNC_TOKEN, compiler->eeFindHelper(helper),
+ INDEBUG_LDISASM_COMMA(nullptr) addr, argSize, retSize, gcInfo.gcVarPtrSetCur,
+ gcInfo.gcRegGCrefSetCur, gcInfo.gcRegByrefSetCur, BAD_IL_OFFSET, REG_NA, REG_NA, 0,
+ 0, /* ilOffset, ireg, xreg, xmul, disp */
+ false, /* isJump */
+ emitter::emitNoGChelper(helper),
+ (CorInfoHelpFunc)helper == CORINFO_HELP_PROF_FCN_LEAVE);
+ }
+
+ regTracker.rsTrashRegSet(RBM_CALLEE_TRASH);
+ regTracker.rsTrashRegsForGCInterruptability();
}
#endif // _TARGET_ARM_