summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authormikedn <onemihaid@hotmail.com>2018-06-07 08:18:59 +0300
committerBruce Forstall <brucefo@microsoft.com>2018-06-06 22:18:59 -0700
commit4cdb8103d1d3dbc68577f22c73b1d8112e9706bc (patch)
tree934cbd89a007404529de947c42fe442ce768ff49 /src
parentf7993ddfc19ccce311df69b32f675bd076935e7f (diff)
downloadcoreclr-4cdb8103d1d3dbc68577f22c73b1d8112e9706bc.tar.gz
coreclr-4cdb8103d1d3dbc68577f22c73b1d8112e9706bc.tar.bz2
coreclr-4cdb8103d1d3dbc68577f22c73b1d8112e9706bc.zip
Fix ARM cast codegen (#18063)
* Fix ARM cast codegen ARM cast codegen is rather convoluted and sometimes does the wrong thing by applying GTF_UNSIGNED to the destination type even though this flag is only about the source type. * Add more conversion tests These tests are intended to support casts with contained operands. They're also useful to catch issues caused by load nodes having small types and generally improve test coverage for casts.
Diffstat (limited to 'src')
-rw-r--r--src/jit/codegenarm.cpp2
-rw-r--r--src/jit/codegenarmarch.cpp115
-rw-r--r--src/jit/emitarm.cpp8
3 files changed, 57 insertions, 68 deletions
diff --git a/src/jit/codegenarm.cpp b/src/jit/codegenarm.cpp
index a378545dc4..67a609ca90 100644
--- a/src/jit/codegenarm.cpp
+++ b/src/jit/codegenarm.cpp
@@ -107,7 +107,7 @@ void CodeGen::instGen_Set_Reg_To_Imm(emitAttr size, regNumber reg, ssize_t imm,
if (getEmitter()->isLowRegister(reg) && (imm_hi16 == 0xffff) && ((imm_lo16 & 0x8000) == 0x8000))
{
- getEmitter()->emitIns_R_R(INS_sxth, EA_2BYTE, reg, reg);
+ getEmitter()->emitIns_R_R(INS_sxth, EA_4BYTE, reg, reg);
}
else
{
diff --git a/src/jit/codegenarmarch.cpp b/src/jit/codegenarmarch.cpp
index e7209a8b93..17bb104ed6 100644
--- a/src/jit/codegenarmarch.cpp
+++ b/src/jit/codegenarmarch.cpp
@@ -2891,10 +2891,8 @@ void CodeGen::genIntToIntCast(GenTree* treeNode)
GenTree* castOp = treeNode->gtCast.CastOp();
emitter* emit = getEmitter();
- var_types dstType = treeNode->CastToType();
- var_types srcType = genActualType(castOp->TypeGet());
- emitAttr movSize = emitActualTypeSize(dstType);
- bool movRequired = false;
+ var_types dstType = treeNode->CastToType();
+ var_types srcType = genActualType(castOp->TypeGet());
assert(genTypeSize(srcType) <= genTypeSize(TYP_I_IMPL));
@@ -2907,8 +2905,6 @@ void CodeGen::genIntToIntCast(GenTree* treeNode)
assert(genIsValidIntReg(targetReg));
assert(genIsValidIntReg(sourceReg));
- instruction ins = INS_invalid;
-
genConsumeReg(castOp);
Lowering::CastInfo castInfo;
@@ -2917,7 +2913,9 @@ void CodeGen::genIntToIntCast(GenTree* treeNode)
if (castInfo.requiresOverflowCheck)
{
- emitAttr cmpSize = EA_ATTR(genTypeSize(srcType));
+ bool movRequired = (sourceReg != targetReg);
+ emitAttr movSize = emitActualTypeSize(dstType);
+ emitAttr cmpSize = EA_ATTR(genTypeSize(srcType));
if (castInfo.signCheckOnly)
{
@@ -3007,72 +3005,63 @@ void CodeGen::genIntToIntCast(GenTree* treeNode)
emitJumpKind jmpLT = genJumpKindForOper(GT_LT, CK_SIGNED);
genJumpToThrowHlpBlk(jmpLT, SCK_OVERFLOW);
}
- ins = INS_mov;
+
+ if (movRequired)
+ {
+ emit->emitIns_R_R(INS_mov, movSize, targetReg, sourceReg);
+ }
}
else // Non-overflow checking cast.
{
- if (genTypeSize(srcType) == genTypeSize(dstType))
+ const unsigned srcSize = genTypeSize(srcType);
+ const unsigned dstSize = genTypeSize(dstType);
+ instruction ins;
+ emitAttr insSize;
+
+ if (dstSize < 4)
{
- ins = INS_mov;
+ // Casting to a small type really means widening from that small type to INT/LONG.
+ ins = ins_Move_Extend(dstType, true);
+ insSize = emitActualTypeSize(treeNode->TypeGet());
}
- else
+#ifdef _TARGET_64BIT_
+ // dstType cannot be a long type on 32 bit targets, such casts should have been decomposed.
+ // srcType cannot be a small type since it's the "actual type" of the cast operand.
+ // This means that widening casts do not occur on 32 bit targets.
+ else if (dstSize > srcSize)
{
- var_types extendType = TYP_UNKNOWN;
-
- if (genTypeSize(srcType) < genTypeSize(dstType))
- {
- // If we need to treat a signed type as unsigned
- if ((treeNode->gtFlags & GTF_UNSIGNED) != 0)
- {
- extendType = genUnsignedType(srcType);
- }
- else
- extendType = srcType;
-#ifdef _TARGET_ARM_
- movSize = emitTypeSize(extendType);
-#endif // _TARGET_ARM_
- if (extendType == TYP_UINT)
- {
-#ifdef _TARGET_ARM64_
- // If we are casting from a smaller type to
- // a larger type, then we need to make sure the
- // higher 4 bytes are zero to gaurentee the correct value.
- // Therefore using a mov with EA_4BYTE in place of EA_8BYTE
- // will zero the upper bits
- movSize = EA_4BYTE;
-#endif // _TARGET_ARM64_
- movRequired = true;
- }
- }
- else // (genTypeSize(srcType) > genTypeSize(dstType))
- {
- // If we need to treat a signed type as unsigned
- if ((treeNode->gtFlags & GTF_UNSIGNED) != 0)
- {
- extendType = genUnsignedType(dstType);
- }
- else
- extendType = dstType;
-#if defined(_TARGET_ARM_)
- movSize = emitTypeSize(extendType);
-#elif defined(_TARGET_ARM64_)
- if (extendType == TYP_INT)
- {
- movSize = EA_8BYTE; // a sxtw instruction requires EA_8BYTE
- }
-#endif // _TARGET_*
- }
+ // (U)INT to (U)LONG widening cast
+ assert((srcSize == 4) && (dstSize == 8));
+ // Make sure the node type has the same size as the destination type.
+ assert(genTypeSize(treeNode->TypeGet()) == dstSize);
- ins = ins_Move_Extend(extendType, true);
+ ins = treeNode->IsUnsigned() ? INS_mov : INS_sxtw;
+ // SXTW requires EA_8BYTE but MOV requires EA_4BYTE in order to zero out the upper 32 bits.
+ insSize = (ins == INS_sxtw) ? EA_8BYTE : EA_4BYTE;
}
- }
+#endif
+ else
+ {
+ // Sign changing cast or narrowing cast
+ assert(dstSize <= srcSize);
+ // Note that narrowing casts are possible only on 64 bit targets.
+ assert(srcSize <= genTypeSize(TYP_I_IMPL));
+ // Make sure the node type has the same size as the destination type.
+ assert(genTypeSize(treeNode->TypeGet()) == dstSize);
- // We should never be generating a load from memory instruction here!
- assert(!emit->emitInsIsLoad(ins));
+ // This cast basically does nothing, even when narrowing it is the job of the
+ // consumer of this node to use the appropiate register size (32 or 64 bit)
+ // and not rely on the cast to set the upper 32 bits in a certain manner.
+ // Still, we will need to generate a MOV instruction if the source and target
+ // registers are different.
+ ins = (sourceReg != targetReg) ? INS_mov : INS_none;
+ insSize = EA_SIZE(dstSize);
+ }
- if ((ins != INS_mov) || movRequired || (targetReg != sourceReg))
- {
- emit->emitIns_R_R(ins, movSize, targetReg, sourceReg);
+ if (ins != INS_none)
+ {
+ emit->emitIns_R_R(ins, insSize, targetReg, sourceReg);
+ }
}
genProduceReg(treeNode);
diff --git a/src/jit/emitarm.cpp b/src/jit/emitarm.cpp
index 89de34aa0d..81381fe25f 100644
--- a/src/jit/emitarm.cpp
+++ b/src/jit/emitarm.cpp
@@ -2199,12 +2199,12 @@ void emitter::emitIns_R_R(
case INS_sxtb:
case INS_uxtb:
- assert(size == EA_1BYTE);
+ assert(size == EA_4BYTE);
goto EXTEND_COMMON;
case INS_sxth:
case INS_uxth:
- assert(size == EA_2BYTE);
+ assert(size == EA_4BYTE);
EXTEND_COMMON:
assert(insDoesNotSetFlags(flags));
if (isLowRegister(reg1) && isLowRegister(reg2))
@@ -2638,12 +2638,12 @@ void emitter::emitIns_R_R_I(instruction ins,
case INS_sxtb:
case INS_uxtb:
- assert(size == EA_1BYTE);
+ assert(size == EA_4BYTE);
goto EXTEND_COMMON;
case INS_sxth:
case INS_uxth:
- assert(size == EA_2BYTE);
+ assert(size == EA_4BYTE);
EXTEND_COMMON:
assert(insOptsNone(opt));
assert(insDoesNotSetFlags(flags));