summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorCarol Eidt <carol.eidt@microsoft.com>2016-03-25 18:22:46 -0700
committerCarol Eidt <carol.eidt@microsoft.com>2016-03-25 18:22:46 -0700
commit9d8513352e858ad3b739626e062f0133698a7111 (patch)
tree751b15b93e1c3c8f17918e22c4182cabf6adeb84
parent24cab6db08b1c7f0c297abefa7fa27bd88082505 (diff)
parent3789f42252a2faf577f90841fb944256c637b4f2 (diff)
downloadcoreclr-9d8513352e858ad3b739626e062f0133698a7111.tar.gz
coreclr-9d8513352e858ad3b739626e062f0133698a7111.tar.bz2
coreclr-9d8513352e858ad3b739626e062f0133698a7111.zip
Merge pull request #3920 from CarolEidt/ChangeLdObjToObj
1stClassStructs: Replace GT_LDOBJ with GT_OBJ
-rw-r--r--src/jit/codegenarm64.cpp31
-rw-r--r--src/jit/codegencommon.cpp8
-rw-r--r--src/jit/codegenlegacy.cpp106
-rw-r--r--src/jit/codegenlinear.h3
-rw-r--r--src/jit/codegenxarch.cpp14
-rw-r--r--src/jit/compiler.cpp15
-rw-r--r--src/jit/compiler.h61
-rw-r--r--src/jit/flowgraph.cpp68
-rwxr-xr-xsrc/jit/gentree.cpp69
-rw-r--r--src/jit/gentree.h40
-rw-r--r--src/jit/gschecks.cpp2
-rw-r--r--src/jit/gtlist.h2
-rw-r--r--src/jit/gtstructs.h2
-rw-r--r--src/jit/importer.cpp141
-rw-r--r--src/jit/liveness.cpp6
-rw-r--r--src/jit/lower.cpp8
-rw-r--r--src/jit/lowerarm64.cpp6
-rw-r--r--src/jit/lowerxarch.cpp16
-rw-r--r--src/jit/morph.cpp167
-rw-r--r--src/jit/rationalize.cpp46
-rw-r--r--src/jit/rationalize.h2
-rw-r--r--src/jit/regalloc.cpp30
-rw-r--r--src/jit/simd.cpp9
-rw-r--r--src/jit/simdcodegenxarch.cpp20
-rw-r--r--src/jit/stackfp.cpp93
-rw-r--r--src/jit/valuenum.cpp2
26 files changed, 434 insertions, 533 deletions
diff --git a/src/jit/codegenarm64.cpp b/src/jit/codegenarm64.cpp
index 47efd1bdbf..6297582834 100644
--- a/src/jit/codegenarm64.cpp
+++ b/src/jit/codegenarm64.cpp
@@ -2512,7 +2512,6 @@ CodeGen::genCodeForTreeNode(GenTreePtr treeNode)
// this will set both the Z and V flags only when dividendReg is MinInt
//
emit->emitIns_R_R_R(INS_adds, size, REG_ZR, dividendReg, dividendReg);
-
inst_JMP(jmpNotEqual, sdivLabel); // goto sdiv if the Z flag is clear
genJumpToThrowHlpBlk(EJ_vs, SCK_ARITH_EXCPN); // if the V flags is set throw ArithmeticException
@@ -2850,8 +2849,8 @@ CodeGen::genCodeForTreeNode(GenTreePtr treeNode)
genProduceReg(treeNode);
break;
- case GT_LDOBJ:
- genCodeForLdObj(treeNode->AsOp());
+ case GT_OBJ:
+ genCodeForObj(treeNode->AsObj());
break;
case GT_MULHI:
@@ -6537,36 +6536,36 @@ CodeGen::genIntrinsic(GenTreePtr treeNode)
}
//---------------------------------------------------------------------
-// genCodeForLdObj - generate code for a GT_LDOBJ node
+// genCodeForObj - generate code for a GT_OBJ node
//
// Arguments
-// treeNode - the GT_LDOBJ node
+// treeNode - the GT_OBJ node
//
// Return value:
// None
//
-void CodeGen::genCodeForLdObj(GenTreeOp* treeNode)
+void CodeGen::genCodeForObj(GenTreeObj* objNode)
{
- assert(treeNode->OperGet() == GT_LDOBJ);
+ assert(objNode->OperGet() == GT_OBJ);
- GenTree* addr = treeNode->gtOp.gtOp1;
+ GenTree* addr = objNode->gtOp.gtOp1;
genConsumeAddress(addr);
regNumber addrReg = addr->gtRegNum;
- regNumber targetReg = treeNode->gtRegNum;
- var_types targetType = treeNode->TypeGet();
+ regNumber targetReg = objNode->gtRegNum;
+ var_types targetType = objNode->TypeGet();
emitter * emit = getEmitter();
- noway_assert(targetType == TYP_STRUCT);
+ noway_assert(varTypeIsStruct(targetType));
noway_assert(targetReg != REG_NA);
- CORINFO_CLASS_HANDLE ldObjClass = treeNode->gtLdObj.gtClass;
- int structSize = compiler->info.compCompHnd->getClassSize(ldObjClass);
+ CORINFO_CLASS_HANDLE objClass = objNode->gtObj.gtClass;
+ int structSize = compiler->info.compCompHnd->getClassSize(objClass);
assert(structSize <= 2*TARGET_POINTER_SIZE);
BYTE gcPtrs[2] = {TYPE_GC_NONE, TYPE_GC_NONE};
- compiler->info.compCompHnd->getClassGClayout(ldObjClass, &gcPtrs[0]);
+ compiler->info.compCompHnd->getClassGClayout(objClass, &gcPtrs[0]);
var_types type0 = compiler->getJitGCType(gcPtrs[0]);
var_types type1 = compiler->getJitGCType(gcPtrs[1]);
@@ -6603,7 +6602,7 @@ void CodeGen::genCodeForLdObj(GenTreeOp* treeNode)
// ldp x2, x3, [x0]
//
if (remainingSize == 2*TARGET_POINTER_SIZE)
- {
+ {
if (hasGCpointers)
{
// We have GC pointers use two ldr instructions
@@ -6691,7 +6690,7 @@ void CodeGen::genCodeForLdObj(GenTreeOp* treeNode)
noway_assert(targetReg != addrReg);
getEmitter()->emitIns_R_R_I(INS_ldr, deferAttr, targetReg, addrReg, deferOffset);
}
- genProduceReg(treeNode);
+ genProduceReg(objNode);
}
diff --git a/src/jit/codegencommon.cpp b/src/jit/codegencommon.cpp
index fbd96eb02b..6eb5e0a522 100644
--- a/src/jit/codegencommon.cpp
+++ b/src/jit/codegencommon.cpp
@@ -842,9 +842,9 @@ void Compiler::compUpdateLifeVar(GenTreePtr tree, VARSET_TP* pLastUseVars)
#if !defined(_TARGET_AMD64_) // no addr nodes on AMD and experimenting with with encountering vars in 'random' order
if (ForCodeGen)
{
- if (tree->gtOper == GT_LDOBJ)
+ if (tree->gtOper == GT_OBJ)
{
- // The tree must have the particular form LDOBJ(ADDR(LCL)); no need to do the check below.
+ // The tree must have the particular form OBJ(ADDR(LCL)); no need to do the check below.
assert(indirAddrLocal != NULL);
}
else if (tree->OperIsIndir())
@@ -1739,9 +1739,9 @@ unsigned CodeGenInterface::InferStructOpSizeAlign(GenTreePtr op, unsigned *align
op = op->gtOp.gtOp2;
}
- if (op->gtOper == GT_LDOBJ)
+ if (op->gtOper == GT_OBJ)
{
- CORINFO_CLASS_HANDLE clsHnd = op->gtLdObj.gtClass;
+ CORINFO_CLASS_HANDLE clsHnd = op->AsObj()->gtClass;
opSize = compiler->info.compCompHnd->getClassSize(clsHnd);
alignment = roundUp(compiler->info.compCompHnd->getClassAlignmentRequirement(clsHnd), TARGET_POINTER_SIZE);
}
diff --git a/src/jit/codegenlegacy.cpp b/src/jit/codegenlegacy.cpp
index 846f51df05..8b9f95937d 100644
--- a/src/jit/codegenlegacy.cpp
+++ b/src/jit/codegenlegacy.cpp
@@ -10551,13 +10551,9 @@ LockBinOpCommon:
return;
}
- case GT_LDOBJ:
- // We need to decide whether all GT_LDOBJ nodes should be eliminated in importation/morphing (by
- // translation into copyblk or ldind), or whether it is legal to have as arguments to this
- // method, where we have to generate code for them.
- // On AMD64, at least, we can get here...
- NYI("Handle GT_LDOBJ, or eliminate them earlier.");
- unreached();
+ case GT_OBJ:
+ // All GT_OBJ nodes must have been morphed prior to this.
+ noway_assert(!"Should not see a GT_OBJ node during CodeGen.");
default:
#ifdef DEBUG
@@ -16533,7 +16529,7 @@ size_t CodeGen::genPushArgList(GenTreePtr call)
arg = arg->gtOp.gtOp2;
}
- noway_assert(arg->gtOper == GT_LDOBJ
+ noway_assert(arg->gtOper == GT_OBJ
|| arg->gtOper == GT_MKREFANY
|| arg->gtOper == GT_IND);
noway_assert((arg->gtFlags & GTF_REVERSE_OPS) == 0);
@@ -16571,12 +16567,12 @@ size_t CodeGen::genPushArgList(GenTreePtr call)
}
else
{
- noway_assert(arg->gtOper == GT_LDOBJ);
+ noway_assert(arg->gtOper == GT_OBJ);
- if (arg->gtLdObj.gtOp1->gtOper == GT_ADDR &&
- arg->gtLdObj.gtOp1->gtOp.gtOp1->gtOper == GT_LCL_VAR)
+ if (arg->gtObj.gtOp1->gtOper == GT_ADDR &&
+ arg->gtObj.gtOp1->gtOp.gtOp1->gtOper == GT_LCL_VAR)
{
- GenTreePtr structLocalTree = arg->gtLdObj.gtOp1->gtOp.gtOp1;
+ GenTreePtr structLocalTree = arg->gtObj.gtOp1->gtOp.gtOp1;
unsigned structLclNum = structLocalTree->gtLclVarCommon.gtLclNum;
LclVarDsc * varDsc = &compiler->lvaTable[structLclNum];
@@ -16598,7 +16594,7 @@ size_t CodeGen::genPushArgList(GenTreePtr call)
addrReg = 0;
// Get the number of BYTES to copy to the stack
- opsz = roundUp(compiler->info.compCompHnd->getClassSize(arg->gtLdObj.gtClass), sizeof(void*));
+ opsz = roundUp(compiler->info.compCompHnd->getClassSize(arg->gtObj.gtClass), sizeof(void*));
size_t bytesToBeCopied = opsz;
// postponedFields is true if we have any postponed fields
@@ -16618,6 +16614,9 @@ size_t CodeGen::genPushArgList(GenTreePtr call)
regMaskTP postponedRegKind = RBM_NONE;
size_t expectedAlignedOffset = UINT_MAX;
+ VARSET_TP* deadVarBits = NULL;
+ compiler->GetPromotedStructDeathVars()->Lookup(structLocalTree, &deadVarBits);
+
// Reverse loop, starts pushing from the end of the struct (i.e. the highest field offset)
//
for (int varNum = varDsc->lvFieldLclStart + varDsc->lvFieldCnt - 1;
@@ -16741,10 +16740,19 @@ size_t CodeGen::genPushArgList(GenTreePtr call)
genSinglePush();
}
- GenTreePtr fieldTree = arg->gtLdObj.gtFldTreeList[(varNum - varDsc->lvFieldLclStart)];
- noway_assert(fieldTree->gtOper == GT_REG_VAR);
#if FEATURE_STACK_FP_X87
-
+ GenTree* fieldTree = new (compiler, GT_REG_VAR) GenTreeLclVar(fieldVarDsc->lvType, varNum, BAD_IL_OFFSET);
+ fieldTree->gtOper = GT_REG_VAR;
+ fieldTree->gtRegNum = fieldVarDsc->lvRegNum;
+ fieldTree->gtRegVar.gtRegNum = fieldVarDsc->lvRegNum;
+ if ((arg->gtFlags & GTF_VAR_DEATH) != 0)
+ {
+ if (fieldVarDsc->lvTracked &&
+ (deadVarBits == NULL || VarSetOps::IsMember(compiler, *deadVarBits, fieldVarDsc->lvVarIndex)))
+ {
+ fieldTree->gtFlags |= GTF_VAR_DEATH;
+ }
+ }
genCodeForTreeStackFP_Leaf(fieldTree);
// Take reg to top of stack
@@ -16913,28 +16921,18 @@ size_t CodeGen::genPushArgList(GenTreePtr call)
break;
}
- // If we have field tree nodes, we need to update liveset for them
- GenTreePtr * fldTreeList = arg->gtLdObj.gtFldTreeList;
- if (fldTreeList != NULL)
- {
- unsigned fieldCount = compiler->lvaTable[structLocalTree->gtLclVarCommon.gtLclNum].lvFieldCnt;
- for (unsigned i = 0; i < fieldCount; i++)
- {
- if (fldTreeList[i] != NULL) genUpdateLife(fldTreeList[i]);
- }
- }
}
- genCodeForTree(arg->gtLdObj.gtOp1, 0);
- noway_assert(arg->gtLdObj.gtOp1->gtFlags & GTF_REG_VAL);
- regNumber reg = arg->gtLdObj.gtOp1->gtRegNum;
+ genCodeForTree(arg->gtObj.gtOp1, 0);
+ noway_assert(arg->gtObj.gtOp1->gtFlags & GTF_REG_VAL);
+ regNumber reg = arg->gtObj.gtOp1->gtRegNum;
// Get the number of DWORDS to copy to the stack
- opsz = roundUp(compiler->info.compCompHnd->getClassSize(arg->gtLdObj.gtClass), sizeof(void*));
+ opsz = roundUp(compiler->info.compCompHnd->getClassSize(arg->gtObj.gtClass), sizeof(void*));
unsigned slots = (unsigned)(opsz / sizeof(void*));
BYTE* gcLayout = new (compiler, CMK_Codegen) BYTE[slots];
- compiler->info.compCompHnd->getClassGClayout(arg->gtLdObj.gtClass, gcLayout);
+ compiler->info.compCompHnd->getClassGClayout(arg->gtObj.gtClass, gcLayout);
BOOL bNoneGC = TRUE;
for (int i = slots - 1; i >= 0; --i)
@@ -17385,14 +17383,14 @@ DEFERRED:
genUpdateLife(op1);
arg = arg->gtOp.gtOp2;
}
- noway_assert((arg->OperGet() == GT_LDOBJ) || (arg->OperGet() == GT_MKREFANY));
+ noway_assert((arg->OperGet() == GT_OBJ) || (arg->OperGet() == GT_MKREFANY));
CORINFO_CLASS_HANDLE clsHnd;
unsigned argAlign;
unsigned slots;
BYTE* gcLayout = NULL;
- // If the struct being passed is a LDOBJ of a local struct variable that is promoted (in the
+ // If the struct being passed is a OBJ of a local struct variable that is promoted (in the
// INDEPENDENT fashion, which doesn't require writes to be written through to the variable's
// home stack loc) "promotedStructLocalVarDesc" will be set to point to the local variable
// table entry for the promoted struct local. As we fill slots with the contents of a
@@ -17406,9 +17404,9 @@ DEFERRED:
unsigned promotedStructOffsetOfFirstStackSlot = 0;
unsigned argOffsetOfFirstStackSlot = UINT32_MAX; // Indicates uninitialized.
- if (arg->OperGet() == GT_LDOBJ)
+ if (arg->OperGet() == GT_OBJ)
{
- clsHnd = arg->gtLdObj.gtClass;
+ clsHnd = arg->gtObj.gtClass;
unsigned originalSize = compiler->info.compCompHnd->getClassSize(clsHnd);
argAlign = roundUp(compiler->info.compCompHnd->getClassAlignmentRequirement(clsHnd), TARGET_POINTER_SIZE);
argSize = (unsigned)(roundUp(originalSize, TARGET_POINTER_SIZE));
@@ -17420,10 +17418,10 @@ DEFERRED:
compiler->info.compCompHnd->getClassGClayout(clsHnd, gcLayout);
// Are we loading a promoted struct local var?
- if (arg->gtLdObj.gtOp1->gtOper == GT_ADDR &&
- arg->gtLdObj.gtOp1->gtOp.gtOp1->gtOper == GT_LCL_VAR)
+ if (arg->gtObj.gtOp1->gtOper == GT_ADDR &&
+ arg->gtObj.gtOp1->gtOp.gtOp1->gtOper == GT_LCL_VAR)
{
- structLocalTree = arg->gtLdObj.gtOp1->gtOp.gtOp1;
+ structLocalTree = arg->gtObj.gtOp1->gtOp.gtOp1;
unsigned structLclNum = structLocalTree->gtLclVarCommon.gtLclNum;
LclVarDsc * varDsc = &compiler->lvaTable[structLclNum];
@@ -17461,17 +17459,17 @@ DEFERRED:
// This code passes a TYP_STRUCT by value using the outgoing arg space var
//
- if (arg->OperGet() == GT_LDOBJ)
+ if (arg->OperGet() == GT_OBJ)
{
regNumber regSrc = REG_STK;
- regNumber regTmp = REG_STK; // This will get set below if the ldobj is not of a promoted struct local.
+ regNumber regTmp = REG_STK; // This will get set below if the obj is not of a promoted struct local.
int cStackSlots = 0;
if (promotedStructLocalVarDesc == NULL)
{
- genComputeReg(arg->gtLdObj.gtOp1, 0, RegSet::ANY_REG, RegSet::KEEP_REG);
- noway_assert(arg->gtLdObj.gtOp1->gtFlags & GTF_REG_VAL);
- regSrc = arg->gtLdObj.gtOp1->gtRegNum;
+ genComputeReg(arg->gtObj.gtOp1, 0, RegSet::ANY_REG, RegSet::KEEP_REG);
+ noway_assert(arg->gtObj.gtOp1->gtFlags & GTF_REG_VAL);
+ regSrc = arg->gtObj.gtOp1->gtRegNum;
}
// The number of bytes to add "argOffset" to get the arg offset of the current slot.
@@ -18259,7 +18257,7 @@ void CodeGen::SetupLateArgs(GenTreePtr call)
genUpdateLife(op1);
arg = arg->gtOp.gtOp2;
}
- noway_assert((arg->OperGet() == GT_LDOBJ) || (arg->OperGet() == GT_LCL_VAR) || (arg->OperGet() == GT_MKREFANY));
+ noway_assert((arg->OperGet() == GT_OBJ) || (arg->OperGet() == GT_LCL_VAR) || (arg->OperGet() == GT_MKREFANY));
// This code passes a TYP_STRUCT by value using
// the argument registers first and
@@ -18322,7 +18320,7 @@ void CodeGen::SetupLateArgs(GenTreePtr call)
// that go dead after this use of the variable in the argument list.
regMaskTP deadFieldVarRegs = RBM_NONE;
- // If the struct being passed is a LDOBJ of a local struct variable that is promoted (in the
+ // If the struct being passed is an OBJ of a local struct variable that is promoted (in the
// INDEPENDENT fashion, which doesn't require writes to be written through to the variables
// home stack loc) "promotedStructLocalVarDesc" will be set to point to the local variable
// table entry for the promoted struct local. As we fill slots with the contents of a
@@ -18338,13 +18336,13 @@ void CodeGen::SetupLateArgs(GenTreePtr call)
BYTE * gcLayout = NULL;
regNumber regSrc = REG_NA;
- if (arg->gtOper == GT_LDOBJ)
+ if (arg->gtOper == GT_OBJ)
{
// Are we loading a promoted struct local var?
- if (arg->gtLdObj.gtOp1->gtOper == GT_ADDR &&
- arg->gtLdObj.gtOp1->gtOp.gtOp1->gtOper == GT_LCL_VAR)
+ if (arg->gtObj.gtOp1->gtOper == GT_ADDR &&
+ arg->gtObj.gtOp1->gtOp.gtOp1->gtOper == GT_LCL_VAR)
{
- structLocalTree = arg->gtLdObj.gtOp1->gtOp.gtOp1;
+ structLocalTree = arg->gtObj.gtOp1->gtOp.gtOp1;
unsigned structLclNum = structLocalTree->gtLclVarCommon.gtLclNum;
LclVarDsc * varDsc = &compiler->lvaTable[structLclNum];
@@ -18366,16 +18364,16 @@ void CodeGen::SetupLateArgs(GenTreePtr call)
{
// If it's not a promoted struct variable, set "regSrc" to the address
// of the struct local.
- genComputeReg(arg->gtLdObj.gtOp1, regNeedMask, RegSet::EXACT_REG, RegSet::KEEP_REG);
- noway_assert(arg->gtLdObj.gtOp1->gtFlags & GTF_REG_VAL);
- regSrc = arg->gtLdObj.gtOp1->gtRegNum;
+ genComputeReg(arg->gtObj.gtOp1, regNeedMask, RegSet::EXACT_REG, RegSet::KEEP_REG);
+ noway_assert(arg->gtObj.gtOp1->gtFlags & GTF_REG_VAL);
+ regSrc = arg->gtObj.gtOp1->gtRegNum;
// Remove this register from the set of registers that we pick from, unless slots equals 1
if (slots > 1)
regNeedMask &= ~genRegMask(regSrc);
}
gcLayout = new (compiler, CMK_Codegen) BYTE[slots];
- compiler->info.compCompHnd->getClassGClayout(arg->gtLdObj.gtClass, gcLayout);
+ compiler->info.compCompHnd->getClassGClayout(arg->gtObj.gtClass, gcLayout);
}
else if (arg->gtOper == GT_LCL_VAR)
{
@@ -18612,7 +18610,7 @@ void CodeGen::SetupLateArgs(GenTreePtr call)
} while (needOverwriteRegSrc != overwriteRegSrc);
}
- if ((arg->gtOper == GT_LDOBJ )&& (promotedStructLocalVarDesc == NULL))
+ if ((arg->gtOper == GT_OBJ) && (promotedStructLocalVarDesc == NULL))
{
regSet.rsMarkRegFree(genRegMask(regSrc));
}
diff --git a/src/jit/codegenlinear.h b/src/jit/codegenlinear.h
index 08a7da24c8..5e34f05245 100644
--- a/src/jit/codegenlinear.h
+++ b/src/jit/codegenlinear.h
@@ -54,7 +54,7 @@
#endif
#ifdef _TARGET_ARM64_
- void genCodeForLdObj(GenTreeOp* treeNode);
+ void genCodeForObj(GenTreeObj* treeNode);
#endif
#ifdef FEATURE_SIMD
@@ -67,6 +67,7 @@
instruction getOpForSIMDIntrinsic(SIMDIntrinsicID intrinsicId, var_types baseType, unsigned *ival = nullptr);
void genSIMDScalarMove(var_types type, regNumber target, regNumber src, SIMDScalarMoveType moveType);
+ void genSIMDZero(var_types targetType, var_types baseType, regNumber targetReg);
void genSIMDIntrinsicInit(GenTreeSIMD* simdNode);
void genSIMDIntrinsicInitN(GenTreeSIMD* simdNode);
void genSIMDIntrinsicInitArray(GenTreeSIMD* simdNode);
diff --git a/src/jit/codegenxarch.cpp b/src/jit/codegenxarch.cpp
index 3dc7d2d80b..0258c1fabc 100644
--- a/src/jit/codegenxarch.cpp
+++ b/src/jit/codegenxarch.cpp
@@ -3608,7 +3608,7 @@ void CodeGen::genStructPutArgUnroll(GenTreePutArgStk* putArgNode, unsigned baseV
assert(src->isContained());
- assert(src->gtOper == GT_LDOBJ);
+ assert(src->gtOper == GT_OBJ);
if (!src->gtOp.gtOp1->isContained())
{
@@ -3628,7 +3628,7 @@ void CodeGen::genStructPutArgUnroll(GenTreePutArgStk* putArgNode, unsigned baseV
size_t slots = size / XMM_REGSIZE_BYTES;
assert(putArgNode->gtGetOp1()->isContained());
- assert(putArgNode->gtGetOp1()->gtOp.gtOper == GT_LDOBJ);
+ assert(putArgNode->gtGetOp1()->gtOp.gtOper == GT_OBJ);
// TODO: In the below code the load and store instructions are for 16 bytes, but the
// type is EA_8BYTE. The movdqa/u are 16 byte instructions, so it works, but
@@ -3636,7 +3636,7 @@ void CodeGen::genStructPutArgUnroll(GenTreePutArgStk* putArgNode, unsigned baseV
while (slots-- > 0)
{
// Load
- genCodeForLoadOffset(INS_movdqu, EA_8BYTE, xmmReg, src->gtGetOp1(), offset); // Load the address of the child of the LdObj node.
+ genCodeForLoadOffset(INS_movdqu, EA_8BYTE, xmmReg, src->gtGetOp1(), offset); // Load the address of the child of the Obj node.
// Store
emit->emitIns_S_R(INS_movdqu,
@@ -5012,7 +5012,7 @@ void CodeGen::genConsumePutStructArgStk(GenTreePutArgStk* putArgNode, regNumber
// Get the GT_ADDR node, which is GT_LCL_VAR_ADDR (asserted below.)
GenTree* src = putArgNode->gtGetOp1();
- assert((src->gtOper == GT_LDOBJ) || ((src->gtOper == GT_IND && varTypeIsSIMD(src))));
+ assert((src->gtOper == GT_OBJ) || ((src->gtOper == GT_IND && varTypeIsSIMD(src))));
src = src->gtGetOp1();
size_t size = putArgNode->getArgSize();
@@ -5650,8 +5650,8 @@ void CodeGen::genCallInstruction(GenTreePtr node)
{
assert(arg->OperGet() == GT_PUTARG_STK);
- GenTreeLdObj* ldObj = arg->gtGetOp1()->AsLdObj();
- stackArgBytes = compiler->info.compCompHnd->getClassSize(ldObj->gtClass);
+ GenTreeObj* obj = arg->gtGetOp1()->AsObj();
+ stackArgBytes = compiler->info.compCompHnd->getClassSize(obj->gtClass);
}
else
#endif // FEATURE_UNIX_AMD64_STRUCT_PASSING
@@ -8203,7 +8203,7 @@ CodeGen::genPutStructArgStk(GenTreePtr treeNode, unsigned baseVarNum)
genConsumePutStructArgStk(putArgStk, REG_RDI, REG_RSI, REG_NA, baseVarNum);
GenTreePtr dstAddr = putArgStk;
GenTreePtr src = putArgStk->gtOp.gtOp1;
- assert(src->OperGet() == GT_LDOBJ);
+ assert(src->OperGet() == GT_OBJ);
GenTreePtr srcAddr = src->gtGetOp1();
unsigned slots = putArgStk->gtNumSlots;
diff --git a/src/jit/compiler.cpp b/src/jit/compiler.cpp
index 4274a78837..bf66100187 100644
--- a/src/jit/compiler.cpp
+++ b/src/jit/compiler.cpp
@@ -444,16 +444,16 @@ void Compiler::getStructGcPtrsFromOp(GenTreePtr op, BYTE *gcPtrsOut)
assert(op->TypeGet() == TYP_STRUCT);
#ifdef _TARGET_ARM64_
- if (op->OperGet() == GT_LDOBJ)
+ if (op->OperGet() == GT_OBJ)
{
- CORINFO_CLASS_HANDLE ldObjClass = op->gtLdObj.gtClass;
+ CORINFO_CLASS_HANDLE objClass = op->gtObj.gtClass;
- int structSize = info.compCompHnd->getClassSize(ldObjClass);
+ int structSize = info.compCompHnd->getClassSize(objClass);
assert(structSize <= 2*TARGET_POINTER_SIZE);
BYTE gcPtrsTmp[2] = {TYPE_GC_NONE, TYPE_GC_NONE};
- info.compCompHnd->getClassGClayout(ldObjClass, &gcPtrsTmp[0]);
+ info.compCompHnd->getClassGClayout(objClass, &gcPtrsTmp[0]);
gcPtrsOut[0] = gcPtrsTmp[0];
gcPtrsOut[1] = gcPtrsTmp[1];
@@ -1164,7 +1164,7 @@ void Compiler::compDisplayStaticSizes(FILE* fout)
fprintf(fout, "Size of GenTreeStoreInd = %3u\n", sizeof(GenTreeStoreInd));
fprintf(fout, "Size of GenTreeRetExpr = %3u\n", sizeof(GenTreeRetExpr));
fprintf(fout, "Size of GenTreeStmt = %3u\n", sizeof(GenTreeStmt));
- fprintf(fout, "Size of GenTreeLdObj = %3u\n", sizeof(GenTreeLdObj));
+ fprintf(fout, "Size of GenTreeObj = %3u\n", sizeof(GenTreeObj));
fprintf(fout, "Size of GenTreeClsVar = %3u\n", sizeof(GenTreeClsVar));
fprintf(fout, "Size of GenTreeArgPlace = %3u\n", sizeof(GenTreeArgPlace));
fprintf(fout, "Size of GenTreeLabel = %3u\n", sizeof(GenTreeLabel));
@@ -2515,7 +2515,6 @@ void Compiler::compInitOptions(CORJIT_FLAGS* jitFlags)
if (compIsForInlining() || compIsForImportOnly())
return;
-
// The rest of the opts fields that we initialize here
// should only be used when we generate code for the method
// They should not be used when importing or inlining
@@ -4954,7 +4953,7 @@ int Compiler::compCompileHelper (CORINFO_MODULE_HANDLE clas
prejitResult.SetReported();
}
}
- else
+ else
{
// We are jitting the root method, or inlining.
fgFindBasicBlocks();
@@ -4964,7 +4963,7 @@ int Compiler::compCompileHelper (CORINFO_MODULE_HANDLE clas
if (compDonotInline())
{
goto _Next;
- }
+ }
compSetOptimizationLevel();
diff --git a/src/jit/compiler.h b/src/jit/compiler.h
index abbb611753..5203222da0 100644
--- a/src/jit/compiler.h
+++ b/src/jit/compiler.h
@@ -1767,7 +1767,7 @@ protected:
GenTreePtr src, GenTreePtr size,
bool volatil);
public:
- GenTreeLdObj* gtNewLdObjNode (CORINFO_CLASS_HANDLE structHnd, GenTreePtr addr);
+ GenTreeObj* gtNewObjNode (CORINFO_CLASS_HANDLE structHnd, GenTreePtr addr);
GenTreeBlkOp* gtNewCpObjNode (GenTreePtr dst, GenTreePtr src,
CORINFO_CLASS_HANDLE structHnd, bool volatil);
@@ -6676,10 +6676,10 @@ public :
regMaskTP* pArgSkippedRegMask);
#endif // _TARGET_ARM_
- // If "tree" is a indirection (GT_IND, or GT_LDOBJ) whose arg is an ADDR, whose arg is a LCL_VAR, return that LCL_VAR node, else NULL.
+ // If "tree" is a indirection (GT_IND, or GT_OBJ) whose arg is an ADDR, whose arg is a LCL_VAR, return that LCL_VAR node, else NULL.
GenTreePtr fgIsIndirOfAddrOfLocal(GenTreePtr tree);
- // This is indexed by GT_LDOBJ nodes that are address of promoted struct variables, which
+ // This is indexed by GT_OBJ nodes that are address of promoted struct variables, which
// have been annotated with the GTF_VAR_DEATH flag. If such a node is *not* mapped in this
// table, one may assume that all the (tracked) field vars die at this point. Otherwise,
// the node maps to a pointer to a VARSET_TP, containing set bits for each of the tracked field
@@ -6872,25 +6872,48 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
CORINFO_CLASS_HANDLE SIMDVectorHandle;
// Get the handle for a SIMD type.
- // For the purposes of type handles, we treat all Vector<T> as Vector<float> in the JIT,
- // as the actual instantiation type doesn't impact this code (that is always captured,
- // where semantically meaningful, in the "baseType" of SIMD nodes or lclVars.
- CORINFO_CLASS_HANDLE getStructHandleForSIMDType(var_types type)
- {
- noway_assert(varTypeIsSIMD(type));
- CORINFO_CLASS_HANDLE structHnd;
- switch (type)
+ CORINFO_CLASS_HANDLE gtGetStructHandleForSIMD(var_types simdType, var_types simdBaseType)
+ {
+ if (simdBaseType == TYP_FLOAT)
{
- case TYP_SIMD8: structHnd = SIMDVector2Handle; break;
- case TYP_SIMD12: structHnd = SIMDVector3Handle; break;
- case TYP_SIMD16: structHnd = SIMDVector4Handle; break;
-#ifdef FEATURE_AVX_SUPPORT
- case TYP_SIMD32: structHnd = SIMDFloatHandle; break;
-#endif // FEATURE_AVX_SUPPORT
- default: unreached();
+ switch(simdType)
+ {
+ case TYP_SIMD8:
+ return SIMDVector2Handle;
+ case TYP_SIMD12:
+ return SIMDVector3Handle;
+ case TYP_SIMD16:
+ if ((getSIMDVectorType() == TYP_SIMD32) ||
+ (SIMDVector4Handle != NO_CLASS_HANDLE))
+ {
+ return SIMDVector4Handle;
+ }
+ break;
+ case TYP_SIMD32:
+ break;
+ default:
+ unreached();
+ }
}
- return structHnd;
+ assert(simdType == getSIMDVectorType());
+ switch(simdBaseType)
+ {
+ case TYP_FLOAT: return SIMDFloatHandle;
+ case TYP_DOUBLE: return SIMDDoubleHandle;
+ case TYP_INT: return SIMDIntHandle;
+ case TYP_CHAR: return SIMDUShortHandle;
+ case TYP_USHORT: return SIMDUShortHandle;
+ case TYP_UBYTE: return SIMDUByteHandle;
+ case TYP_SHORT: return SIMDShortHandle;
+ case TYP_BYTE: return SIMDByteHandle;
+ case TYP_LONG: return SIMDLongHandle;
+ case TYP_UINT: return SIMDUIntHandle;
+ case TYP_ULONG: return SIMDULongHandle;
+ default: assert(!"Didn't find a class handle for simdType");
+ }
+ return NO_CLASS_HANDLE;
}
+
// SIMD Methods
CORINFO_METHOD_HANDLE SIMDVectorFloat_set_Item;
CORINFO_METHOD_HANDLE SIMDVectorFloat_get_Length;
diff --git a/src/jit/flowgraph.cpp b/src/jit/flowgraph.cpp
index b3da7646db..0aca16048e 100644
--- a/src/jit/flowgraph.cpp
+++ b/src/jit/flowgraph.cpp
@@ -6592,7 +6592,7 @@ bool Compiler::fgIsCommaThrow(GenTreePtr tree,
GenTreePtr Compiler::fgIsIndirOfAddrOfLocal(GenTreePtr tree)
{
GenTreePtr res = nullptr;
- if (tree->OperGet() == GT_LDOBJ || tree->OperIsIndir())
+ if (tree->OperGet() == GT_OBJ || tree->OperIsIndir())
{
GenTreePtr addr = tree->gtOp.gtOp1;
@@ -17713,25 +17713,6 @@ void Compiler::fgSetTreeSeqHelper(GenTreePtr tree)
return;
}
- /* Handle the case of an LDOBJ with a field list */
-
- GenTreePtr lclVarTree;
- if ((oper == GT_LDOBJ) &&
- tree->gtLdObj.gtFldTreeList != NULL &&
- impIsAddressInLocal(tree->gtOp.gtOp1, &lclVarTree))
- {
- GenTreePtr* fldTreeList = tree->gtLdObj.gtFldTreeList;
- unsigned fieldCount = lvaTable[lclVarTree->gtLclVarCommon.gtLclNum].lvFieldCnt;
-
- for (unsigned i = 0; i < fieldCount; i++)
- {
- if (fldTreeList[i] != NULL)
- {
- fgSetTreeSeqHelper(fldTreeList[i]);
- }
- }
- }
-
/* Check for a nilary operator */
if (op1 == NULL)
@@ -20755,27 +20736,6 @@ void Compiler::fgDebugCheckNodeLinks(BasicBlock* block, GenTree* node)
{
GenTreePtr lclVarTree;
expectedPrevTree = tree->gtOp.gtOp1;
- if ((tree->gtOper == GT_LDOBJ) &&
- (tree->gtLdObj.gtFldTreeList != NULL) &&
- impIsAddressInLocal(tree->gtOp.gtOp1, &lclVarTree))
- {
- GenTreePtr* fldTreeList = tree->gtLdObj.gtFldTreeList;
- GenTreePtr prev = NULL;
- unsigned fieldCount = lvaTable[lclVarTree->gtLclVarCommon.gtLclNum].lvFieldCnt;
-
- for (unsigned i = 0; i < fieldCount; i++)
- {
- if (fldTreeList[i] != NULL)
- {
- if (prev != NULL)
- {
- noway_assert(fldTreeList[i]->gtPrev == prev);
- }
- prev = fldTreeList[i];
- }
- }
- noway_assert(lclVarTree->gtPrev == prev);
- }
}
else if (tree->OperIsBinary() && tree->gtOp.gtOp1)
{
@@ -21531,7 +21491,7 @@ GenTreePtr Compiler::fgGetStructAsStructPtr(GenTreePtr tree)
noway_assert((tree->gtOper == GT_LCL_VAR) ||
(tree->gtOper == GT_FIELD) ||
(tree->gtOper == GT_IND) ||
- (tree->gtOper == GT_LDOBJ) ||
+ (tree->gtOper == GT_OBJ) ||
tree->OperIsSIMD() ||
// tree->gtOper == GT_CALL || cannot get address of call.
// tree->gtOper == GT_MKREFANY || inlining should've been aborted due to mkrefany opcode.
@@ -21540,7 +21500,7 @@ GenTreePtr Compiler::fgGetStructAsStructPtr(GenTreePtr tree)
switch (tree->OperGet())
{
- case GT_LDOBJ:
+ case GT_OBJ:
case GT_IND:
return tree->gtOp.gtOp1;
@@ -22440,29 +22400,19 @@ GenTreePtr Compiler::fgInlinePrependStatements(InlineInfo* inlineInfo)
{
/* Create the temp assignment for this argument */
- CORINFO_CLASS_HANDLE structType = DUMMY_INIT(0);
+ CORINFO_CLASS_HANDLE structHnd = DUMMY_INIT(0);
if (varTypeIsStruct(lclVarInfo[argNum].lclTypeInfo))
{
- if (inlArgInfo[argNum].argNode->gtOper == GT_LDOBJ)
- {
- structType = inlArgInfo[argNum].argNode->gtLdObj.gtClass;
- }
- else if (inlArgInfo[argNum].argNode->gtOper == GT_MKREFANY)
- {
- structType = lclVarInfo[argNum].lclVerTypeInfo.GetClassHandle();
- }
- else
- {
- noway_assert(!"Unknown struct type");
- }
+ structHnd = gtGetStructHandleIfPresent(inlArgInfo[argNum].argNode);
+ noway_assert(structHnd != NO_CLASS_HANDLE);
}
// Unsafe value cls check is not needed for argTmpNum here since in-linee compiler instance would have
// iterated over these and marked them accordingly.
impAssignTempGen(inlArgInfo[argNum].argTmpNum,
inlArgInfo[argNum].argNode,
- structType,
+ structHnd,
(unsigned)CHECK_SPILL_NONE,
& afterStmt,
callILOffset,
@@ -22505,10 +22455,10 @@ GenTreePtr Compiler::fgInlinePrependStatements(InlineInfo* inlineInfo)
{
noway_assert(inlArgInfo[argNum].argIsUsed == false);
- if (inlArgInfo[argNum].argNode->gtOper == GT_LDOBJ ||
+ if (inlArgInfo[argNum].argNode->gtOper == GT_OBJ ||
inlArgInfo[argNum].argNode->gtOper == GT_MKREFANY)
{
- // Don't put GT_LDOBJ node under a GT_COMMA.
+ // Don't put GT_OBJ node under a GT_COMMA.
// Codegen can't deal with it.
// Just hang the address here in case there are side-effect.
newStmt = gtNewStmt(gtUnusedValNode(inlArgInfo[argNum].argNode->gtOp.gtOp1), callILOffset);
diff --git a/src/jit/gentree.cpp b/src/jit/gentree.cpp
index f8a1eeb4b6..cad84177c4 100755
--- a/src/jit/gentree.cpp
+++ b/src/jit/gentree.cpp
@@ -259,7 +259,7 @@ void GenTree::InitNodeSize()
GenTree::s_gtNodeSizes[GT_ARR_INDEX ] = TREE_NODE_SZ_LARGE;
GenTree::s_gtNodeSizes[GT_ARR_OFFSET ] = TREE_NODE_SZ_LARGE;
GenTree::s_gtNodeSizes[GT_RET_EXPR ] = TREE_NODE_SZ_LARGE;
- GenTree::s_gtNodeSizes[GT_LDOBJ ] = TREE_NODE_SZ_LARGE;
+ GenTree::s_gtNodeSizes[GT_OBJ ] = TREE_NODE_SZ_LARGE;
GenTree::s_gtNodeSizes[GT_FIELD ] = TREE_NODE_SZ_LARGE;
GenTree::s_gtNodeSizes[GT_STMT ] = TREE_NODE_SZ_LARGE;
GenTree::s_gtNodeSizes[GT_CMPXCHG ] = TREE_NODE_SZ_LARGE;
@@ -331,7 +331,7 @@ void GenTree::InitNodeSize()
static_assert_no_msg(sizeof(GenTreeCpObj) <= TREE_NODE_SZ_LARGE); // *** large node
static_assert_no_msg(sizeof(GenTreeRetExpr) <= TREE_NODE_SZ_LARGE); // *** large node
static_assert_no_msg(sizeof(GenTreeStmt) <= TREE_NODE_SZ_LARGE); // *** large node
- static_assert_no_msg(sizeof(GenTreeLdObj) <= TREE_NODE_SZ_LARGE); // *** large node
+ static_assert_no_msg(sizeof(GenTreeObj) <= TREE_NODE_SZ_LARGE); // *** large node
static_assert_no_msg(sizeof(GenTreeClsVar) <= TREE_NODE_SZ_SMALL);
static_assert_no_msg(sizeof(GenTreeArgPlace) <= TREE_NODE_SZ_SMALL);
static_assert_no_msg(sizeof(GenTreeLabel) <= TREE_NODE_SZ_SMALL);
@@ -1252,8 +1252,8 @@ AGAIN:
case GT_CAST:
if (op1->gtCast.gtCastType != op2->gtCast.gtCastType) return false;
break;
- case GT_LDOBJ:
- if (op1->gtLdObj.gtClass != op2->gtLdObj.gtClass) return false;
+ case GT_OBJ:
+ if (op1->AsObj()->gtClass != op2->AsObj()->gtClass) return false;
break;
// For the ones below no extra argument matters for comparison.
@@ -1759,8 +1759,8 @@ AGAIN:
case GT_CAST:
hash ^= tree->gtCast.gtCastType;
break;
- case GT_LDOBJ:
- hash ^= static_cast<unsigned>(reinterpret_cast<uintptr_t>(tree->gtLdObj.gtClass));
+ case GT_OBJ:
+ hash ^= static_cast<unsigned>(reinterpret_cast<uintptr_t>(tree->gtObj.gtClass));
break;
case GT_INDEX:
hash += tree->gtIndex.gtIndElemSize;
@@ -3113,7 +3113,7 @@ COMMON_CNS:
break;
case GT_MKREFANY:
- case GT_LDOBJ:
+ case GT_OBJ:
level = gtSetEvalOrder(tree->gtOp.gtOp1);
ftreg |= tree->gtOp.gtOp1->gtRsvdRegs;
costEx = tree->gtOp.gtOp1->gtCostEx + 1;
@@ -4545,10 +4545,10 @@ GenTreePtr* GenTree::gtGetChildPointer(GenTreePtr parent)
#if !FEATURE_MULTIREG_ARGS
// Note that when FEATURE_MULTIREG_ARGS==1
- // a GT_LDOBJ node is handled above by the default case
- case GT_LDOBJ:
- // Any GT_LDOBJ with a field must be lowered before this point.
- noway_assert(!"GT_LDOBJ encountered in GenTree::gtGetChildPointer");
+ // a GT_OBJ node is handled above by the default case
+ case GT_OBJ:
+ // Any GT_OBJ with a field must be lowered before this point.
+ noway_assert(!"GT_OBJ encountered in GenTree::gtGetChildPointer");
break;
#endif // !FEATURE_MULTIREG_ARGS
@@ -4717,10 +4717,7 @@ bool GenTree::OperMayThrow()
case GT_ARR_INDEX:
case GT_CATCH_ARG:
case GT_ARR_LENGTH:
- case GT_LDOBJ:
- case GT_INITBLK:
- case GT_COPYBLK:
- case GT_COPYOBJ:
+ case GT_OBJ:
case GT_LCLHEAP:
case GT_CKFINITE:
case GT_NULLCHECK:
@@ -5450,12 +5447,12 @@ GenTreePtr Compiler::gtNewAssignNode(GenTreePtr dst, GenTreePtr src DEB
return asg;
}
-// Creates a new LdObj node.
-GenTreeLdObj* Compiler::gtNewLdObjNode(CORINFO_CLASS_HANDLE structHnd, GenTree* addr)
+// Creates a new Obj node.
+GenTreeObj* Compiler::gtNewObjNode(CORINFO_CLASS_HANDLE structHnd, GenTree* addr)
{
var_types nodeType = impNormStructType(structHnd);
assert(varTypeIsStruct(nodeType));
- return new (this, GT_LDOBJ) GenTreeLdObj(nodeType, addr, structHnd);
+ return new (this, GT_OBJ) GenTreeObj(nodeType, addr, structHnd);
}
// Creates a new CpObj node.
@@ -6072,9 +6069,8 @@ GenTreePtr Compiler::gtCloneExpr(GenTree * tree,
VarSetOps::AssignAllowUninitRhs(this, copy->gtQmark.gtElseLiveSet, tree->gtQmark.gtElseLiveSet);
break;
- case GT_LDOBJ:
- copy = new (this, GT_LDOBJ) GenTreeLdObj(tree->TypeGet(), tree->gtOp.gtOp1, tree->gtLdObj.gtClass);
- // Apparently, it's OK not to copy the field list when cloning. Or at least no tests fail when we don't.
+ case GT_OBJ:
+ copy = new (this, GT_OBJ) GenTreeObj(tree->TypeGet(), tree->gtOp.gtOp1, tree->AsObj()->gtClass);
break;
case GT_BOX:
@@ -8480,7 +8476,7 @@ void Compiler::gtDispTree(GenTreePtr tree,
printf(" %s <- %s", varTypeName(toType), varTypeName(fromType));
}
- if (tree->gtOper == GT_LDOBJ && (tree->gtFlags & GTF_VAR_DEATH))
+ if (tree->gtOper == GT_OBJ && (tree->gtFlags & GTF_VAR_DEATH))
{
printf(" (last use)");
}
@@ -10988,19 +10984,22 @@ GenTreePtr Compiler::gtNewTempAssign(unsigned tmp, GenTreePtr val)
// With first-class structs, we should be propagating the class handle on all non-primitive
// struct types. But we don't have a convenient way to do that for all SIMD temps.
- if (varTypeIsStruct(valTyp) && (gtGetStructHandleIfPresent(val) != NO_CLASS_HANDLE))
+
+ CORINFO_CLASS_HANDLE structHnd = gtGetStructHandleIfPresent(val);
+ if (varTypeIsStruct(valTyp) && (structHnd != NO_CLASS_HANDLE))
{
- /* The GT_LDOBJ may be be a child of a GT_COMMA */
+ // The GT_OBJ may be be a child of a GT_COMMA.
GenTreePtr valx = val->gtEffectiveVal(/*commaOnly*/true);
- if (valx->gtOper == GT_LDOBJ)
+ if (valx->gtOper == GT_OBJ)
{
- lvaSetStruct(tmp, valx->gtLdObj.gtClass, false);
+ lvaSetStruct(tmp, structHnd, false);
}
dest->gtFlags |= GTF_DONT_CSE;
valx->gtFlags |= GTF_DONT_CSE;
- asg = impAssignStruct(dest, val,
- lvaGetStruct(tmp),
+ asg = impAssignStruct(dest,
+ val,
+ structHnd,
(unsigned)CHECK_SPILL_NONE);
}
else
@@ -11125,8 +11124,7 @@ GenTreePtr Compiler::gtNewRefCOMfield(GenTreePtr objPtr,
{
if (varTypeIsStruct(lclTyp))
{
- tree = new (this, GT_LDOBJ) GenTreeLdObj(lclTyp, tree, structType);
- tree->gtLdObj.gtFldTreeList = NULL;
+ tree = gtNewObjNode(structType, tree);
}
else
{
@@ -12880,7 +12878,8 @@ CORINFO_CLASS_HANDLE Compiler::gtGetStructHandleIfPresent(GenTree* tree)
{
default:
break;
- case GT_LDOBJ: structHnd = tree->gtLdObj.gtClass; break;
+ case GT_MKREFANY: structHnd = impGetRefAnyClass(); break;
+ case GT_OBJ: structHnd = tree->gtObj.gtClass; break;
case GT_CALL: structHnd = tree->gtCall.gtRetClsHnd; break;
case GT_RET_EXPR: structHnd = tree->gtRetExpr.gtRetClsHnd; break;
case GT_ARGPLACE: structHnd = tree->gtArgPlace.gtArgPlaceClsHnd; break;
@@ -12892,7 +12891,9 @@ CORINFO_CLASS_HANDLE Compiler::gtGetStructHandleIfPresent(GenTree* tree)
case GT_LCL_VAR:
case GT_LCL_FLD:
structHnd = lvaTable[tree->AsLclVarCommon()->gtLclNum].lvVerTypeInfo.GetClassHandle();
- assert(structHnd != NO_CLASS_HANDLE);
+ break;
+ case GT_RETURN:
+ structHnd = gtGetStructHandleIfPresent(tree->gtOp.gtOp1);
break;
case GT_IND:
if (tree->gtFlags & GTF_IND_ARR_INDEX)
@@ -12905,11 +12906,11 @@ CORINFO_CLASS_HANDLE Compiler::gtGetStructHandleIfPresent(GenTree* tree)
#ifdef FEATURE_SIMD
else if (varTypeIsSIMD(tree))
{
- structHnd = getStructHandleForSIMDType(tree->gtType);
+ structHnd = gtGetStructHandleForSIMD(tree->gtType, TYP_FLOAT);
}
break;
case GT_SIMD:
- structHnd = getStructHandleForSIMDType(tree->gtType);
+ structHnd = gtGetStructHandleForSIMD(tree->gtType, tree->AsSIMD()->gtSIMDBaseType);
#endif // FEATURE_SIMD
break;
}
diff --git a/src/jit/gentree.h b/src/jit/gentree.h
index a1787f1d72..631f67b79f 100644
--- a/src/jit/gentree.h
+++ b/src/jit/gentree.h
@@ -1213,7 +1213,7 @@ public:
case GT_COPYBLK:
case GT_COPYOBJ:
case GT_INITBLK:
- case GT_LDOBJ:
+ case GT_OBJ:
case GT_BOX:
case GT_ARR_INDEX:
case GT_ARR_ELEM:
@@ -2869,6 +2869,27 @@ protected:
#endif // DEBUGGABLE_GENTREE
};
+// gtObj -- 'object' (GT_OBJ). */
+
+struct GenTreeObj: public GenTreeUnOp
+{
+ // The address of the block.
+ GenTreePtr& Addr() { return gtOp1; }
+
+ CORINFO_CLASS_HANDLE gtClass; // the class of the object
+
+ GenTreeObj(var_types type, GenTreePtr addr, CORINFO_CLASS_HANDLE cls) :
+ GenTreeUnOp(GT_OBJ, type, addr),
+ gtClass(cls)
+ {
+ gtFlags |= GTF_GLOB_REF; // An Obj is always a global reference.
+ }
+
+#if DEBUGGABLE_GENTREE
+ GenTreeObj() : GenTreeUnOp() {}
+#endif
+};
+
// Represents a CpObj MSIL Node.
struct GenTreeCpObj : public GenTreeBlkOp
{
@@ -3270,24 +3291,7 @@ struct GenTreeStmt: public GenTree
#endif
};
-/* gtLdObj -- 'push object' (GT_LDOBJ). */
-struct GenTreeLdObj: public GenTreeUnOp
-{
- CORINFO_CLASS_HANDLE gtClass; // object being loaded
- // TODO-Cleanup: Consider adding the GC layout information to this node
- GenTreePtr * gtFldTreeList; // The list of trees that represents the fields of this struct
-
- GenTreeLdObj(var_types type, GenTreePtr op, CORINFO_CLASS_HANDLE cls) :
- GenTreeUnOp(GT_LDOBJ, type, op),
- gtClass(cls), gtFldTreeList(NULL)
- {
- gtFlags |= GTF_GLOB_REF; // A LdObj is always a global reference.
- }
-#if DEBUGGABLE_GENTREE
- GenTreeLdObj() : GenTreeUnOp() {}
-#endif
-};
/* NOTE: Any tree nodes that are larger than 8 bytes (two ints or
diff --git a/src/jit/gschecks.cpp b/src/jit/gschecks.cpp
index 90187655f7..3a80aa4f40 100644
--- a/src/jit/gschecks.cpp
+++ b/src/jit/gschecks.cpp
@@ -125,7 +125,7 @@ Compiler::fgWalkResult Compiler::gsMarkPtrsAndAssignGroups(GenTreePtr *pTree, fg
fIsBlk = true;
// fallthrough
case GT_IND:
- case GT_LDOBJ:
+ case GT_OBJ:
case GT_ARR_ELEM:
case GT_ARR_INDEX:
case GT_ARR_OFFSET:
diff --git a/src/jit/gtlist.h b/src/jit/gtlist.h
index 365c8e322a..4eee84031b 100644
--- a/src/jit/gtlist.h
+++ b/src/jit/gtlist.h
@@ -73,7 +73,7 @@ GTNODE(IND , "indir" ,0,GTK_UNOP) // load indirect
GTNODE(STOREIND , "storeIndir" ,0,GTK_BINOP) // store indirection
// TODO-Cleanup: GT_ARR_BOUNDS_CHECK should be made a GTK_BINOP now that it has only two child nodes
GTNODE(ARR_BOUNDS_CHECK , "arrBndsChk" ,0,GTK_SPECIAL) // array bounds check
-GTNODE(LDOBJ , "ldobj" ,0,GTK_UNOP|GTK_EXOP)
+GTNODE(OBJ , "obj" ,0,GTK_UNOP|GTK_EXOP)
GTNODE(BOX , "box" ,0,GTK_UNOP|GTK_EXOP)
#ifdef FEATURE_SIMD
diff --git a/src/jit/gtstructs.h b/src/jit/gtstructs.h
index d9fefbedaa..3c55c30517 100644
--- a/src/jit/gtstructs.h
+++ b/src/jit/gtstructs.h
@@ -80,7 +80,7 @@ GTSTRUCT_1(ArrOffs , GT_ARR_OFFSET)
GTSTRUCT_1(ArrIndex , GT_ARR_INDEX)
GTSTRUCT_1(RetExpr , GT_RET_EXPR)
GTSTRUCT_1(Stmt , GT_STMT)
-GTSTRUCT_1(LdObj , GT_LDOBJ)
+GTSTRUCT_1(Obj , GT_OBJ)
GTSTRUCT_2(ClsVar , GT_CLS_VAR, GT_CLS_VAR_ADDR)
GTSTRUCT_1(ArgPlace , GT_ARGPLACE)
GTSTRUCT_1(Label , GT_LABEL)
diff --git a/src/jit/importer.cpp b/src/jit/importer.cpp
index 514c7f1083..0dbecf8ba8 100644
--- a/src/jit/importer.cpp
+++ b/src/jit/importer.cpp
@@ -588,6 +588,7 @@ inline
void Compiler::impAppendStmt(GenTreePtr stmt, unsigned chkLevel)
{
assert(stmt->gtOper == GT_STMT);
+ noway_assert(impTreeLast != nullptr);
/* If the statement being appended has any side-effects, check the stack
to see if anything needs to be spilled to preserve correct ordering. */
@@ -599,7 +600,8 @@ void Compiler::impAppendStmt(GenTreePtr stmt, unsigned chkLevel)
we handle them specially using impSpillLclRefs(). Temp locals should
be fine too. */
- if ((expr->gtOper == GT_ASG) && (expr->gtOp.gtOp1->gtOper == GT_LCL_VAR) &&
+ if ((expr->gtOper == GT_ASG) &&
+ (expr->gtOp.gtOp1->gtOper == GT_LCL_VAR) &&
!(expr->gtOp.gtOp1->gtFlags & GTF_GLOB_REF) &&
!gtHasLocalsWithAddrOp(expr->gtOp.gtOp2))
{
@@ -872,7 +874,7 @@ GenTreeArgList* Compiler::impPopList(unsigned count,
if (varTypeIsStruct(temp))
{
- // Morph trees that aren't already LDOBJs or MKREFANY to be LDOBJs
+ // Morph trees that aren't already OBJs or MKREFANY to be OBJs
assert(ti.IsType(TI_STRUCT));
structType = ti.GetClassHandleForValueClass();
temp = impNormStructVal(temp, structType, (unsigned)CHECK_SPILL_ALL);
@@ -1068,11 +1070,11 @@ GenTreePtr Compiler::impAssignStruct(GenTreePtr dest,
assert(dest->gtOper == GT_LCL_VAR || dest->gtOper == GT_RETURN ||
dest->gtOper == GT_FIELD || dest->gtOper == GT_IND ||
- dest->gtOper == GT_LDOBJ);
+ dest->gtOper == GT_OBJ);
GenTreePtr destAddr;
- if (dest->gtOper == GT_IND || dest->gtOper == GT_LDOBJ)
+ if (dest->gtOper == GT_IND || dest->gtOper == GT_OBJ)
{
destAddr = dest->gtOp.gtOp1;
}
@@ -1099,7 +1101,7 @@ GenTreePtr Compiler::impAssignStructPtr(GenTreePtr dest,
// TODO-ARM-BUG: Does ARM need this?
// TODO-ARM64-BUG: Does ARM64 need this?
assert(src->gtOper == GT_LCL_VAR || src->gtOper == GT_FIELD ||
- src->gtOper == GT_IND || src->gtOper == GT_LDOBJ ||
+ src->gtOper == GT_IND || src->gtOper == GT_OBJ ||
src->gtOper == GT_CALL || src->gtOper == GT_MKREFANY ||
src->gtOper == GT_RET_EXPR || src->gtOper == GT_COMMA ||
src->gtOper == GT_ADDR ||
@@ -1108,7 +1110,7 @@ GenTreePtr Compiler::impAssignStructPtr(GenTreePtr dest,
assert(varTypeIsStruct(src));
assert(src->gtOper == GT_LCL_VAR || src->gtOper == GT_FIELD ||
- src->gtOper == GT_IND || src->gtOper == GT_LDOBJ ||
+ src->gtOper == GT_IND || src->gtOper == GT_OBJ ||
src->gtOper == GT_CALL || src->gtOper == GT_MKREFANY ||
src->gtOper == GT_RET_EXPR || src->gtOper == GT_COMMA ||
(src->TypeGet() != TYP_STRUCT && (GenTree::OperIsSIMD(src->gtOper) || src->gtOper == GT_LCL_FLD)));
@@ -1197,9 +1199,9 @@ GenTreePtr Compiler::impAssignStructPtr(GenTreePtr dest,
return gtNewAssignNode(dest, src);
}
}
- else if (src->gtOper == GT_LDOBJ)
+ else if (src->gtOper == GT_OBJ)
{
- assert(src->gtLdObj.gtClass == structHnd);
+ assert(src->gtObj.gtClass == structHnd);
src = src->gtOp.gtOp1;
}
else if (src->gtOper == GT_MKREFANY)
@@ -1280,14 +1282,14 @@ GenTreePtr Compiler::impGetStructAddr(GenTreePtr structVal,
genTreeOps oper = structVal->gtOper;
- if (oper == GT_LDOBJ && willDeref)
+ if (oper == GT_OBJ && willDeref)
{
- assert(structVal->gtLdObj.gtClass == structHnd);
- return(structVal->gtLdObj.gtOp1);
+ assert(structVal->gtObj.gtClass == structHnd);
+ return(structVal->gtObj.gtOp.gtOp1);
}
- else if (oper == GT_CALL || oper == GT_RET_EXPR || oper == GT_LDOBJ || oper == GT_MKREFANY)
+ else if (oper == GT_CALL || oper == GT_RET_EXPR || oper == GT_OBJ || oper == GT_MKREFANY)
{
- unsigned tmpNum = lvaGrabTemp(true DEBUGARG("struct address for call/ldobj"));
+ unsigned tmpNum = lvaGrabTemp(true DEBUGARG("struct address for call/obj"));
impAssignTempGen(tmpNum, structVal, structHnd, curLevel);
@@ -1408,8 +1410,8 @@ var_types Compiler::impNormStructType(CORINFO_CLASS_HANDLE structHnd,
}
//****************************************************************************
-// Given TYP_STRUCT value 'structVal', make sure it is 'canonical'
-// is must be either a LDOBJ or a MKREFANY node
+// Given TYP_STRUCT value 'structVal', make sure it is 'canonical', that is
+// it is either an OBJ or a MKREFANY node.
//
GenTreePtr Compiler::impNormStructVal(GenTreePtr structVal,
CORINFO_CLASS_HANDLE structHnd,
@@ -1447,10 +1449,6 @@ GenTreePtr Compiler::impNormStructVal(GenTreePtr structVal,
structVal->gtType = structType;
break;
- case GT_IND:
- structVal->gtType = structType;
- break;
-
case GT_INDEX:
structVal->gtIndex.gtStructElemClass = structHnd;
structVal->gtIndex.gtIndElemSize = info.compCompHnd->getClassSize(structHnd);
@@ -1465,21 +1463,24 @@ GenTreePtr Compiler::impNormStructVal(GenTreePtr structVal,
case GT_LCL_FLD:
break;
- case GT_LDOBJ:
+ case GT_OBJ:
+ case GT_IND:
// These should already have the appropriate type.
assert(structVal->gtType == structType);
break;
#ifdef FEATURE_SIMD
case GT_SIMD:
- // These don't preserve the handle.
- assert(varTypeIsSIMD(structVal));
+ assert(varTypeIsSIMD(structVal) && (structVal->gtType == structType));
break;
#endif // FEATURE_SIMD
case GT_COMMA:
{
+ // The second thing is the struct node.
GenTree* op2 = structVal->gtOp.gtOp2;
+ assert(op2->gtType == structType);
+
impNormStructVal(op2, structHnd, curLevel, forceNormalization);
structType = op2->TypeGet();
structVal->gtType = structType;
@@ -1492,13 +1493,13 @@ GenTreePtr Compiler::impNormStructVal(GenTreePtr structVal,
}
// Is it already normalized?
- if (!forceNormalization && (structVal->gtOper == GT_MKREFANY || structVal->gtOper == GT_LDOBJ))
+ if (!forceNormalization && (structVal->gtOper == GT_MKREFANY || structVal->gtOper == GT_OBJ))
return(structVal);
- // Normalize it by wraping it in a LDOBJ
+ // Normalize it by wraping it in an OBJ
GenTreePtr structAddr = impGetStructAddr(structVal, structHnd, curLevel, !forceNormalization); // get the addr of struct
- GenTreePtr structLdobj = new (this, GT_LDOBJ) GenTreeLdObj(structType, structAddr, structHnd);
+ GenTreePtr structObj = new (this, GT_OBJ) GenTreeObj(structType, structAddr, structHnd);
if (structAddr->gtOper == GT_ADDR)
{
@@ -1509,18 +1510,18 @@ GenTreePtr Compiler::impNormStructVal(GenTreePtr structVal,
}
if (structVal->IsLocal())
{
- // A LDOBJ on a ADDR(LCL_VAR) can never raise an exception
+ // A OBJ on a ADDR(LCL_VAR) can never raise an exception
// so we don't set GTF_EXCEPT here.
//
- // TODO-CQ: Clear the GTF_GLOB_REF flag on structLdobj as well
+ // TODO-CQ: Clear the GTF_GLOB_REF flag on structObj as well
// but this needs additional work when inlining.
}
else
{
- // In general a LDOBJ is an IND and could raise an exception
- structLdobj->gtFlags |= GTF_EXCEPT;
+ // In general a OBJ is an IND and could raise an exception
+ structObj->gtFlags |= GTF_EXCEPT;
}
- return(structLdobj);
+ return(structObj);
}
@@ -1848,7 +1849,7 @@ GenTreePtr Compiler::impRuntimeLookupToTree(CORINFO_RUNTIME_LOOKUP_KIND
* Spills the stack at verCurrentState.esStack[level] and replaces it with a temp.
* If tnum!=BAD_VAR_NUM, the temp var used to replace the tree is tnum,
* else, grab a new temp.
- * For structs (which can be pushed on the stack using ldobj, etc),
+ * For structs (which can be pushed on the stack using obj, etc),
* special handling is needed
*/
@@ -2847,8 +2848,8 @@ GenTreePtr Compiler::impInitializeArrayIntrinsic(CORINFO_SIG_INFO * sig)
//
// At this point we are ready to commit to implementing the InitializeArray
- // instrinsic using a GT_COPYBLK node. Pop the arguments from the stack and
- // return the GT_COPYBLK node.
+ // instrinsic using a struct assignment. Pop the arguments from the stack and
+ // return the struct assignment node.
//
impPopStack();
@@ -4706,7 +4707,7 @@ GenTreePtr Compiler::impTransformThis (GenTreePtr thisPtr,
GenTreePtr obj = thisPtr;
assert(obj->TypeGet() == TYP_BYREF || obj->TypeGet() == TYP_I_IMPL);
- obj = gtNewLdObjNode(pConstrainedResolvedToken->hClass, obj);
+ obj = gtNewObjNode(pConstrainedResolvedToken->hClass, obj);
obj->gtFlags |= GTF_EXCEPT;
CorInfoType jitTyp = info.compCompHnd->asCorInfoType(pConstrainedResolvedToken->hClass);
@@ -4714,7 +4715,7 @@ GenTreePtr Compiler::impTransformThis (GenTreePtr thisPtr,
{
obj->ChangeOperUnchecked(GT_IND);
- // ldobj could point anywhere, example a boxed class static int
+ // Obj could point anywhere, example a boxed class static int
obj->gtFlags |= GTF_IND_TGTANYWHERE;
obj->gtType = JITtype2varType(jitTyp);
@@ -5212,7 +5213,7 @@ GenTreePtr Compiler::impImportStaticFieldAccess(CORINFO_RESOLVED_TOKEN * pResolv
if (varTypeIsStruct(lclTyp))
{
// Constructor adds GTF_GLOB_REF. Note that this is *not* GTF_EXCEPT.
- op1 = gtNewLdObjNode(pFieldInfo->structType, op1);
+ op1 = gtNewObjNode(pFieldInfo->structType, op1);
}
else
{
@@ -7174,12 +7175,12 @@ REDO_RETURN_NODE:
{
op->ChangeOper(GT_LCL_FLD);
}
- else if (op->gtOper == GT_LDOBJ)
+ else if (op->gtOper == GT_OBJ)
{
- GenTreePtr op1 = op->gtLdObj.gtOp1;
+ GenTreePtr op1 = op->AsObj()->Addr();
- // We will fold away LDOBJ/ADDR
- // except for LDOBJ/ADDR/INDEX
+ // We will fold away OBJ/ADDR
+ // except for OBJ/ADDR/INDEX
// as the array type influences the array element's offset
// Later in this method we change op->gtType to info.compRetNativeType
// This is not correct when op is a GT_INDEX as the starting offset
@@ -7193,9 +7194,7 @@ REDO_RETURN_NODE:
op = op1->gtOp.gtOp1;
goto REDO_RETURN_NODE;
}
-
- op->gtLdObj.gtClass = nullptr;
- op->gtLdObj.gtFldTreeList = nullptr;
+ op->gtObj.gtClass = NO_CLASS_HANDLE;
op->ChangeOperUnchecked(GT_IND);
op->gtFlags |= GTF_IND_TGTANYWHERE;
}
@@ -7219,7 +7218,7 @@ REDO_RETURN_NODE:
// No need to spill anything as we're about to return.
impAssignTempGen(tmpNum, op, info.compMethodInfo->args.retTypeClass, (unsigned)CHECK_SPILL_NONE);
- // Don't both creating a GT_ADDR & GT_LDOBJ jsut to undo all of that
+ // Don't create both a GT_ADDR & GT_OBJ just to undo all of that; instead,
// jump directly to a GT_LCL_FLD.
op = gtNewLclvNode(tmpNum, info.compRetNativeType);
op->ChangeOper(GT_LCL_FLD);
@@ -7243,7 +7242,7 @@ REDO_RETURN_NODE:
#endif // !FEATURE_UNIX_AMD64_STRUCT_PASSING
#endif // DEBUG
- // Don't change the gtType node just yet, it will get changed later
+ // Don't change the gtType of the node just yet, it will get changed later.
return op;
}
}
@@ -8792,6 +8791,7 @@ DECODE_OPCODE:
GenTreePtr op3;
genTreeOps oper;
+ unsigned size;
int val;
@@ -9674,14 +9674,23 @@ ARR_LD_POST_VERIFY:
// remember the element size
if (lclTyp == TYP_REF)
+ {
op1->gtIndex.gtIndElemSize = sizeof(void*);
+ }
else
{
// If ldElemClass is precisely a primitive type, use that, otherwise, preserve the struct type.
if (info.compCompHnd->getTypeForPrimitiveValueClass(ldelemClsHnd) == CORINFO_TYPE_UNDEF)
+ {
op1->gtIndex.gtStructElemClass = ldelemClsHnd;
+ }
assert(lclTyp != TYP_STRUCT || op1->gtIndex.gtStructElemClass != nullptr);
- op1->gtIndex.gtIndElemSize = info.compCompHnd->getClassSize(ldelemClsHnd);
+ if (lclTyp == TYP_STRUCT)
+ {
+ size = info.compCompHnd->getClassSize(ldelemClsHnd);
+ op1->gtIndex.gtIndElemSize = size;
+ op1->gtType = lclTyp;
+ }
}
if ((opcode == CEE_LDELEMA) || ldstruct)
@@ -9699,8 +9708,8 @@ ARR_LD_POST_VERIFY:
if (ldstruct)
{
- // Do a LDOBJ on the result
- op1 = gtNewLdObjNode(ldelemClsHnd, op1);
+ // Create an OBJ for the result
+ op1 = gtNewObjNode(ldelemClsHnd, op1);
op1->gtFlags |= GTF_EXCEPT;
}
impPushOnStack(op1, tiRetVal);
@@ -10708,7 +10717,7 @@ _CONV:
if (varTypeIsStruct(op1))
{
#ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
- // Non-calls, such as ldobj or ret_expr, have to go through this.
+ // Non-calls, such as obj or ret_expr, have to go through this.
// Calls with large struct return value have to go through this.
// Helper calls with small struct return value also have to go
// through this since they do not follow Unix calling convention.
@@ -11779,7 +11788,9 @@ DO_LDFTN:
/* Preserve 'small' int types */
if (lclTyp > TYP_INT)
+ {
lclTyp = genActualType(lclTyp);
+ }
bool usesHelper = false;
@@ -11945,7 +11956,7 @@ DO_LDFTN:
if (!usesHelper)
{
- assert((op1->OperGet() == GT_FIELD) || (op1->OperGet() == GT_IND) || (op1->OperGet() == GT_LDOBJ));
+ assert((op1->OperGet() == GT_FIELD) || (op1->OperGet() == GT_IND) || (op1->OperGet() == GT_OBJ));
op1->gtFlags |= GTF_IND_VOLATILE;
}
}
@@ -11954,7 +11965,7 @@ DO_LDFTN:
{
if (!usesHelper)
{
- assert((op1->OperGet() == GT_FIELD) || (op1->OperGet() == GT_IND) || (op1->OperGet() == GT_LDOBJ));
+ assert((op1->OperGet() == GT_FIELD) || (op1->OperGet() == GT_IND) || (op1->OperGet() == GT_OBJ));
op1->gtFlags |= GTF_IND_UNALIGNED;
}
}
@@ -12072,8 +12083,10 @@ FIELD_DONE:
}
/* Preserve 'small' int types */
- if (lclTyp > TYP_INT)
+ if (lclTyp > TYP_INT)
+ {
lclTyp = genActualType(lclTyp);
+ }
switch (fieldInfo.fieldAccessor)
{
@@ -12462,7 +12475,7 @@ FIELD_DONE:
// make certain it is normalized;
op1 = impNormStructVal(op1, impGetRefAnyClass(), (unsigned)CHECK_SPILL_ALL);
- if (op1->gtOper == GT_LDOBJ)
+ if (op1->gtOper == GT_OBJ)
{
// Get the address of the refany
op1 = op1->gtOp.gtOp1;
@@ -12576,8 +12589,8 @@ FIELD_DONE:
/* Pop the object and create the unbox helper call */
/* You might think that for UNBOX_ANY we need to push a different */
/* (non-byref) type, but here we're making the tiRetVal that is used */
- /* for the intermediate pointer which we then transfer onto the LDOBJ */
- /* instruction. LDOBJ then creates the appropriate tiRetVal. */
+ /* for the intermediate pointer which we then transfer onto the OBJ */
+ /* instruction. OBJ then creates the appropriate tiRetVal. */
if (tiVerificationNeeded)
{
typeInfo tiUnbox = impStackTop().seTypeInfo;
@@ -12679,7 +12692,7 @@ FIELD_DONE:
| UNBOX | push the BYREF | spill the STRUCT to a local, |
| | | push the BYREF to this local |
|---------------------------------------------------------------------
- | UNBOX_ANY | push a GT_LDOBJ of | push the STRUCT |
+ | UNBOX_ANY | push a GT_OBJ of | push the STRUCT |
| | the BYREF | For Linux when the |
| | | struct is returned in two |
| | | registers create a temp |
@@ -12720,8 +12733,8 @@ FIELD_DONE:
{
// Normal unbox helper returns a TYP_BYREF.
impPushOnStack(op1, tiRetVal);
- oper = GT_LDOBJ;
- goto LDOBJ;
+ oper = GT_OBJ;
+ goto OBJ;
}
assert(helper == CORINFO_HELP_UNBOX_NULLABLE && "Make sure the helper is nullable!");
@@ -12757,12 +12770,12 @@ FIELD_DONE:
impPushOnStack(op1, tiRetVal);
// Load the struct.
- oper = GT_LDOBJ;
+ oper = GT_OBJ;
assert(op1->gtType == TYP_BYREF);
assert(!tiVerificationNeeded || tiRetVal.IsByRef());
- goto LDOBJ;
+ goto OBJ;
}
else
{
@@ -13230,14 +13243,14 @@ INITBLK_OR_INITOBJ:
case CEE_LDOBJ: {
- oper = GT_LDOBJ;
+ oper = GT_OBJ;
assertImp(sz == sizeof(unsigned));
_impResolveToken(CORINFO_TOKENKIND_Class);
JITDUMP(" %08X", resolvedToken.token);
-LDOBJ:
+OBJ:
tiRetVal = verMakeTypeInfo(resolvedToken.hClass);
@@ -13292,9 +13305,9 @@ LDOBJ:
}
else
{
- // LDOBJ returns a struct
+ // OBJ returns a struct
// and an inline argument which is the class token of the loaded obj
- op1 = gtNewLdObjNode(resolvedToken.hClass, op1);
+ op1 = gtNewObjNode(resolvedToken.hClass, op1);
}
op1->gtFlags |= GTF_EXCEPT;
diff --git a/src/jit/liveness.cpp b/src/jit/liveness.cpp
index 43eab82707..07977af79d 100644
--- a/src/jit/liveness.cpp
+++ b/src/jit/liveness.cpp
@@ -1859,13 +1859,13 @@ SKIP_QMARK:
#ifdef LEGACY_BACKEND
// Generally, the last use information is associated with the lclVar node.
// However, for LEGACY_BACKEND, the information must be associated
- // with the LDOBJ itself for promoted structs.
+ // with the OBJ itself for promoted structs.
// In that case, the LDOBJ may be require an implementation that might itself allocate registers,
// so the variable(s) should stay live until the end of the LDOBJ.
// Note that for promoted structs lvTracked is false.
GenTreePtr lclVarTree = nullptr;
- if (tree->gtOper == GT_LDOBJ)
+ if (tree->gtOper == GT_OBJ)
{
// fgIsIndirOfAddrOfLocal returns nullptr if the tree is
// not an indir(addr(local)), in which case we will set lclVarTree
@@ -2208,7 +2208,7 @@ bool Compiler::fgRemoveDeadStore(GenTree** pTree, LclVarDsc* varDsc, VARSET_TP l
}
else if (asgNode == nullptr)
{
- // This may be followed by GT_IND/assign, GT_STOREIND or GT_LIST/block-op.
+ // This may be followed by GT_IND/assign or GT_STOREIND.
if (nextNode == nullptr)
{
return false;
diff --git a/src/jit/lower.cpp b/src/jit/lower.cpp
index 2d7b61f913..6e228a2f3f 100644
--- a/src/jit/lower.cpp
+++ b/src/jit/lower.cpp
@@ -1311,16 +1311,16 @@ GenTreePtr Lowering::NewPutArg(GenTreeCall* call, GenTreePtr arg, fgArgTabEntryP
{
unsigned numRefs = 0;
BYTE* gcLayout = new (comp, CMK_Codegen) BYTE[fp->numSlots];
- // We use GT_LDOBJ for non-SIMD struct arguments. However, for
- // SIMD arguments the GT_LDOBJ has already been transformed.
- if (arg->gtOper != GT_LDOBJ)
+ // We use GT_OBJ for non-SIMD struct arguments. However, for
+ // SIMD arguments the GT_OBJ has already been transformed.
+ if (arg->gtOper != GT_OBJ)
{
assert(varTypeIsSIMD(arg));
}
else
{
assert(!varTypeIsSIMD(arg));
- numRefs = comp->info.compCompHnd->getClassGClayout(arg->gtLdObj.gtClass, gcLayout);
+ numRefs = comp->info.compCompHnd->getClassGClayout(arg->gtObj.gtClass, gcLayout);
}
putArg->AsPutArgStk()->setGcPointers(numRefs, gcLayout);
diff --git a/src/jit/lowerarm64.cpp b/src/jit/lowerarm64.cpp
index c51f7b052e..71bfc23632 100644
--- a/src/jit/lowerarm64.cpp
+++ b/src/jit/lowerarm64.cpp
@@ -623,10 +623,10 @@ void Lowering::TreeNodeInfoInit(GenTree* stmt)
{
originalSize = 2 * TARGET_POINTER_SIZE;
}
- else if (actualArgNode->gtOper == GT_LDOBJ)
+ else if (actualArgNode->gtOper == GT_OBJ)
{
- CORINFO_CLASS_HANDLE ldObjClass = actualArgNode->gtLdObj.gtClass;
- originalSize = compiler->info.compCompHnd->getClassSize(ldObjClass);
+ CORINFO_CLASS_HANDLE objClass = actualArgNode->gtObj.gtClass;
+ originalSize = compiler->info.compCompHnd->getClassSize(objClass);
}
else
{
diff --git a/src/jit/lowerxarch.cpp b/src/jit/lowerxarch.cpp
index c9ea7d18b1..4103b843b0 100644
--- a/src/jit/lowerxarch.cpp
+++ b/src/jit/lowerxarch.cpp
@@ -948,12 +948,12 @@ void Lowering::TreeNodeInfoInit(GenTree* stmt)
#ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
// If the node is TYP_STRUCT and it is put on stack with
// putarg_stk operation, we consume and produce no registers.
- // In this case the embedded LdObj node should not produce
+ // In this case the embedded Obj node should not produce
// registers too since it is contained.
// Note that if it is a SIMD type the argument will be in a register.
if (argNode->TypeGet() == TYP_STRUCT)
{
- assert(argNode->gtOp.gtOp1 != nullptr && argNode->gtOp.gtOp1->OperGet() == GT_LDOBJ);
+ assert(argNode->gtOp.gtOp1 != nullptr && argNode->gtOp.gtOp1->OperGet() == GT_OBJ);
argNode->gtOp.gtOp1->gtLsraInfo.dstCount = 0;
argNode->gtLsraInfo.srcCount = 0;
}
@@ -1001,9 +1001,9 @@ void Lowering::TreeNodeInfoInit(GenTree* stmt)
{
originalSize = 2 * TARGET_POINTER_SIZE;
}
- else if (argNode->gtOper == GT_LDOBJ)
+ else if (argNode->gtOper == GT_OBJ)
{
- noway_assert(!"GT_LDOBJ not supported for amd64");
+ noway_assert(!"GT_OBJ not supported for amd64");
}
#ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
else if (argNode->gtOper == GT_PUTARG_REG)
@@ -1200,8 +1200,8 @@ void Lowering::TreeNodeInfoInit(GenTree* stmt)
}
break;
#ifdef _TARGET_X86_
- case GT_LDOBJ:
- NYI_X86("GT_LDOBJ");
+ case GT_OBJ:
+ NYI_X86("GT_OBJ");
#endif //_TARGET_X86_
case GT_INITBLK:
@@ -1226,7 +1226,7 @@ void Lowering::TreeNodeInfoInit(GenTree* stmt)
GenTreePtr src = tree->gtOp.gtOp1;
GenTreePtr srcAddr = nullptr;
- if ((src->OperGet() == GT_LDOBJ) || (src->OperGet() == GT_IND))
+ if ((src->OperGet() == GT_OBJ) || (src->OperGet() == GT_IND))
{
srcAddr = src->gtOp.gtOp1;
}
@@ -1317,7 +1317,7 @@ void Lowering::TreeNodeInfoInit(GenTree* stmt)
putArgStkTree->gtPutArgStkKind = GenTreePutArgStk::PutArgStkKindRepInstr;
}
- // Always mark the LDOBJ and ADDR as contained trees by the putarg_stk. The codegen will deal with this tree.
+ // Always mark the OBJ and ADDR as contained trees by the putarg_stk. The codegen will deal with this tree.
MakeSrcContained(putArgStkTree, src);
// Balance up the inc above.
diff --git a/src/jit/morph.cpp b/src/jit/morph.cpp
index e163c7e860..93f3aca7da 100644
--- a/src/jit/morph.cpp
+++ b/src/jit/morph.cpp
@@ -2058,13 +2058,13 @@ GenTreePtr Compiler::fgMakeTmpArgNode(unsigned tmpVarNum
{
// ToDo-ARM64: Consider using: arg->ChangeOper(GT_LCL_FLD);
// as that is how FEATURE_UNIX_AMD64_STRUCT_PASSING works.
- // Create a GT_LDOBJ for the argument
+ // Create a GT_OBJ for the argument
// This will be passed by value in two registers
arg = gtNewOperNode(GT_ADDR, TYP_BYREF, arg);
addrNode = arg;
- // Ldobj the temp to use it as a call argument
- arg = gtNewLdObjNode(lvaGetStruct(tmpVarNum), arg);
+ // Create an Obj of the temp to use it as a call argument.
+ arg = gtNewObjNode(lvaGetStruct(tmpVarNum), arg);
}
else
#endif // _TARGET_ARM64_
@@ -2089,8 +2089,8 @@ GenTreePtr Compiler::fgMakeTmpArgNode(unsigned tmpVarNum
arg = gtNewOperNode(GT_ADDR, TYP_BYREF, arg);
addrNode = arg;
- // Ldobj the temp to use it as a call argument
- arg = gtNewLdObjNode(lvaGetStruct(tmpVarNum), arg);
+ // Get a new Obj node temp to use it as a call argument
+ arg = gtNewObjNode(lvaGetStruct(tmpVarNum), arg);
arg->gtFlags |= GTF_EXCEPT;
#endif // not (_TARGET_AMD64_ or _TARGET_ARM64_)
@@ -2303,25 +2303,25 @@ void fgArgInfo::EvalArgsToTemps()
// Need a temp to walk any GT_COMMA nodes when searching for the clsHnd
GenTreePtr defArgTmp = defArg;
- /* The GT_LDOBJ may be be a child of a GT_COMMA */
+ // The GT_OBJ may be be a child of a GT_COMMA.
while (defArgTmp->gtOper == GT_COMMA)
{
defArgTmp = defArgTmp->gtOp.gtOp2;
}
assert(varTypeIsStruct(defArgTmp));
- /* We handle two opcodes: GT_MKREFANY and GT_LDOBJ */
+ // We handle two opcodes: GT_MKREFANY and GT_OBJ.
if (defArgTmp->gtOper == GT_MKREFANY)
{
clsHnd = compiler->impGetRefAnyClass();
}
- else if (defArgTmp->gtOper == GT_LDOBJ)
+ else if (defArgTmp->gtOper == GT_OBJ)
{
- clsHnd = defArgTmp->gtLdObj.gtClass;
+ clsHnd = defArgTmp->AsObj()->gtClass;
}
else
{
- BADCODE("Unhandled TYP_STRUCT argument tree in fgMorphArgs");
+ BADCODE("Unhandled struct argument tree in fgMorphArgs");
}
}
@@ -3153,7 +3153,7 @@ GenTreeCall* Compiler::fgMorphArgs(GenTreeCall* callNode)
#endif
else // struct type
{
- /* We handle two opcodes: GT_MKREFANY and GT_LDOBJ */
+ // We handle two opcodes: GT_MKREFANY and GT_OBJ
if (argx->gtOper == GT_MKREFANY)
{
if (varTypeIsStruct(argx))
@@ -3178,30 +3178,31 @@ GenTreeCall* Compiler::fgMorphArgs(GenTreeCall* callNode)
size = 2;
#endif
}
- else // (argx->gtOper == GT_LDOBJ)
+ else
{
- GenTreePtr argLdobj = argx;
- GenTreePtr* parentOfArgLdobj = parentArgx;
+ // GT_OBJ case
+ GenTreePtr argObj = argx;
+ GenTreePtr* parentOfArgObj = parentArgx;
assert(args->IsList());
assert(argx == args->Current());
- /* The GT_LDOBJ may be be a child of a GT_COMMA */
- while (argLdobj->gtOper == GT_COMMA)
+ /* The GT_OBJ may be be a child of a GT_COMMA */
+ while (argObj->gtOper == GT_COMMA)
{
- parentOfArgLdobj = &argLdobj->gtOp.gtOp2;
- argLdobj = argLdobj->gtOp.gtOp2;
+ parentOfArgObj = &argObj->gtOp.gtOp2;
+ argObj = argObj->gtOp.gtOp2;
}
- if (argLdobj->gtOper != GT_LDOBJ)
+ if (argObj->gtOper != GT_OBJ)
BADCODE("illegal argument tree in fgMorphArgs");
- CORINFO_CLASS_HANDLE ldObjClass = argLdobj->gtLdObj.gtClass;
+ CORINFO_CLASS_HANDLE objClass = argObj->gtObj.gtClass;
#ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
- eeGetSystemVAmd64PassStructInRegisterDescriptor(ldObjClass, &structDesc);
+ eeGetSystemVAmd64PassStructInRegisterDescriptor(objClass, &structDesc);
#endif // FEATURE_UNIX_AMD64_STRUCT_PASSING
- unsigned originalSize = info.compCompHnd->getClassSize(ldObjClass);
+ unsigned originalSize = info.compCompHnd->getClassSize(objClass);
originalSize = (originalSize == 0 ? TARGET_POINTER_SIZE : originalSize);
unsigned roundupSize = (unsigned)roundUp(originalSize, TARGET_POINTER_SIZE);
#ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
@@ -3226,7 +3227,7 @@ GenTreeCall* Compiler::fgMorphArgs(GenTreeCall* callNode)
#ifndef FEATURE_UNIX_AMD64_STRUCT_PASSING
size = 1; // This must be copied to a temp and passed by address
passStructByRef = true;
- copyBlkClass = ldObjClass;
+ copyBlkClass = objClass;
#else // FEATURE_UNIX_AMD64_STRUCT_PASSING
if (!structDesc.passedInRegisters)
{
@@ -3235,9 +3236,9 @@ GenTreeCall* Compiler::fgMorphArgs(GenTreeCall* callNode)
}
else
{
- // The ldObjClass is used to materialize the struct on stack.
+ // The objClass is used to materialize the struct on stack.
passStructInRegisters = true;
- copyBlkClass = ldObjClass;
+ copyBlkClass = objClass;
}
#endif // FEATURE_UNIX_AMD64_STRUCT_PASSING
#elif defined(_TARGET_ARM64_)
@@ -3245,20 +3246,20 @@ GenTreeCall* Compiler::fgMorphArgs(GenTreeCall* callNode)
{
size = 1; // This must be copied to a temp and passed by address
passStructByRef = true;
- copyBlkClass = ldObjClass;
+ copyBlkClass = objClass;
}
#endif
#ifdef _TARGET_ARM_
// If we're passing a promoted struct local var,
// we may need to skip some registers due to alignment; record those.
- GenTreePtr lclVar = fgIsIndirOfAddrOfLocal(argLdobj);
+ GenTreePtr lclVar = fgIsIndirOfAddrOfLocal(argObj);
if (lclVar != NULL)
{
LclVarDsc* varDsc = &lvaTable[lclVar->gtLclVarCommon.gtLclNum];
if (varDsc->lvPromoted)
{
- assert(argLdobj->OperGet() == GT_LDOBJ);
+ assert(argObj->OperGet() == GT_OBJ);
if (lvaGetPromotionType(varDsc) == PROMOTION_TYPE_INDEPENDENT)
{
fgAddSkippedRegsInPromotedStructArg(varDsc, intArgRegNum, &argSkippedRegMask);
@@ -3270,35 +3271,35 @@ GenTreeCall* Compiler::fgMorphArgs(GenTreeCall* callNode)
#ifndef FEATURE_UNIX_AMD64_STRUCT_PASSING
else
{
- // change our GT_LDOBJ into a GT_IND of the correct type.
- structBaseType = argOrReturnTypeForStruct(originalSize, argLdobj->gtLdObj.gtClass, false /* forReturn */);
+ // change our GT_OBJ into a GT_IND of the correct type.
+ structBaseType = argOrReturnTypeForStruct(originalSize, objClass, false /* forReturn */);
// We've already ensured above that size is a power of 2, and less than pointer size.
noway_assert(structBaseType != TYP_UNKNOWN);
- argLdobj->ChangeOper(GT_IND);
+ argObj->ChangeOper(GT_IND);
// Now see if we can fold *(&X) into X
- if (argLdobj->gtOp.gtOp1->gtOper == GT_ADDR)
+ if (argObj->gtOp.gtOp1->gtOper == GT_ADDR)
{
- GenTreePtr temp = argLdobj->gtOp.gtOp1->gtOp.gtOp1;
+ GenTreePtr temp = argObj->gtOp.gtOp1->gtOp.gtOp1;
// Keep the DONT_CSE flag in sync
// (as the addr always marks it for its op1)
temp->gtFlags &= ~GTF_DONT_CSE;
- temp->gtFlags |= (argLdobj->gtFlags & GTF_DONT_CSE);
- DEBUG_DESTROY_NODE(argLdobj->gtOp.gtOp1); // GT_ADDR
- DEBUG_DESTROY_NODE(argLdobj); // GT_IND
+ temp->gtFlags |= (argObj->gtFlags & GTF_DONT_CSE);
+ DEBUG_DESTROY_NODE(argObj->gtOp.gtOp1); // GT_ADDR
+ DEBUG_DESTROY_NODE(argObj); // GT_IND
- argLdobj = temp;
- *parentOfArgLdobj = temp;
+ argObj = temp;
+ *parentOfArgObj = temp;
- // If the LDOBJ had been the top level node, we've now changed argx.
- if (parentOfArgLdobj == parentArgx)
+ // If the OBJ had been the top level node, we've now changed argx.
+ if (parentOfArgObj == parentArgx)
argx = temp;
}
- if (argLdobj->gtOper == GT_LCL_VAR)
+ if (argObj->gtOper == GT_LCL_VAR)
{
- unsigned lclNum = argLdobj->gtLclVarCommon.gtLclNum;
+ unsigned lclNum = argObj->gtLclVarCommon.gtLclNum;
LclVarDsc * varDsc = &lvaTable[lclNum];
if (varDsc->lvPromoted)
@@ -3310,36 +3311,36 @@ GenTreeCall* Compiler::fgMorphArgs(GenTreeCall* callNode)
if (genTypeSize(fieldVarDsc->TypeGet()) >= originalSize)
{
// we will use the first and only promoted field
- argLdobj->gtLclVarCommon.SetLclNum(varDsc->lvFieldLclStart);
+ argObj->gtLclVarCommon.SetLclNum(varDsc->lvFieldLclStart);
if (varTypeCanReg(fieldVarDsc->TypeGet()) && (genTypeSize(fieldVarDsc->TypeGet()) == originalSize))
{
// Just use the existing field's type
- argLdobj->gtType = fieldVarDsc->TypeGet();
+ argObj->gtType = fieldVarDsc->TypeGet();
}
else
{
// Can't use the existing field's type, so use GT_LCL_FLD to swizzle
// to a new type
- argLdobj->ChangeOper(GT_LCL_FLD);
- argLdobj->gtType = structBaseType;
+ argObj->ChangeOper(GT_LCL_FLD);
+ argObj->gtType = structBaseType;
}
- assert(varTypeCanReg(argLdobj->TypeGet()));
+ assert(varTypeCanReg(argObj->TypeGet()));
assert(copyBlkClass == NO_CLASS_HANDLE);
}
else
{
// use GT_LCL_FLD to swizzle the single field struct to a new type
lvaSetVarDoNotEnregister(lclNum DEBUG_ARG(DNER_LocalField));
- argLdobj->ChangeOper(GT_LCL_FLD);
- argLdobj->gtType = structBaseType;
+ argObj->ChangeOper(GT_LCL_FLD);
+ argObj->gtType = structBaseType;
}
}
else
{
// The struct fits into a single register, but it has been promoted into its
// constituent fields, and so we have to re-assemble it
- copyBlkClass = ldObjClass;
+ copyBlkClass = objClass;
#ifdef _TARGET_ARM_
// Alignment constraints may cause us not to use (to "skip") some argument registers.
// Add those, if any, to the skipped (int) arg reg mask.
@@ -3350,16 +3351,16 @@ GenTreeCall* Compiler::fgMorphArgs(GenTreeCall* callNode)
else if (!varTypeIsIntegralOrI(varDsc->TypeGet()))
{
// Not a promoted struct, so just swizzle the type by using GT_LCL_FLD
- argLdobj->ChangeOper(GT_LCL_FLD);
- argLdobj->gtType = structBaseType;
+ argObj->ChangeOper(GT_LCL_FLD);
+ argObj->gtType = structBaseType;
}
}
else
{
// Not a GT_LCL_VAR, so we can just change the type on the node
- argLdobj->gtType = structBaseType;
+ argObj->gtType = structBaseType;
}
- assert(varTypeCanReg(argLdobj->TypeGet()) ||
+ assert(varTypeCanReg(argObj->TypeGet()) ||
((copyBlkClass != NO_CLASS_HANDLE) && varTypeIsIntegral(structBaseType)));
size = 1;
@@ -3367,7 +3368,7 @@ GenTreeCall* Compiler::fgMorphArgs(GenTreeCall* callNode)
#endif // FEATURE_UNIX_AMD64_STRUCT_PASSING
#endif // not _TARGET_X86_
- // We still have a struct unless we converted the GT_LDOBJ into a GT_IND above...
+ // We still have a struct unless we converted the GT_OBJ into a GT_IND above...
if ((structBaseType == TYP_STRUCT) &&
#if defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
!passStructInRegisters
@@ -3377,30 +3378,30 @@ GenTreeCall* Compiler::fgMorphArgs(GenTreeCall* callNode)
)
{
// if the valuetype size is not a multiple of sizeof(void*),
- // we must copyblk to a temp before doing the ldobj to avoid
- // the ldobj reading memory past the end of the valuetype
+ // we must copyblk to a temp before doing the obj to avoid
+ // the obj reading memory past the end of the valuetype
#if defined(_TARGET_X86_) && !defined(LEGACY_BACKEND)
// TODO-X86-CQ: [1091733] Revisit for small structs, we should use push instruction
- copyBlkClass = ldObjClass;
+ copyBlkClass = objClass;
size = roundupSize / TARGET_POINTER_SIZE; // Normalize size to number of pointer sized items
#else // !defined(_TARGET_X86_) || defined(LEGACY_BACKEND)
if (roundupSize > originalSize)
{
- copyBlkClass = ldObjClass;
+ copyBlkClass = objClass;
// There are a few special cases where we can omit using a CopyBlk
// where we normally would need to use one.
- GenTreePtr ldObjOp1 = argLdobj->gtLdObj.gtOp1;
- if (ldObjOp1->gtOper == GT_ADDR)
+ GenTreePtr objAddr = argObj->gtObj.gtOp1;
+ if (objAddr->gtOper == GT_ADDR)
{
// exception : no need to use CopyBlk if the valuetype is on the stack
- if (ldObjOp1->gtFlags & GTF_ADDR_ONSTACK)
+ if (objAddr->gtFlags & GTF_ADDR_ONSTACK)
{
copyBlkClass = NO_CLASS_HANDLE;
}
// exception : no need to use CopyBlk if the valuetype is already a struct local
- else if (ldObjOp1->gtOp.gtOp1->gtOper == GT_LCL_VAR)
+ else if (objAddr->gtOp.gtOp1->gtOper == GT_LCL_VAR)
{
copyBlkClass = NO_CLASS_HANDLE;
}
@@ -3846,7 +3847,9 @@ GenTreeCall* Compiler::fgMorphArgs(GenTreeCall* callNode)
}
else
#endif // FEATURE_UNIX_AMD64_STRUCT_PASSING
+ {
argSlots += size;
+ }
} // end foreach argument loop
if (!lateArgsComputed)
@@ -4046,8 +4049,8 @@ void Compiler::fgMorphSystemVStructArgs(GenTreeCall* call, bool hasStructArgumen
continue;
}
- // If already LDOBJ it is set properly already.
- if (arg->OperGet() == GT_LDOBJ)
+ // If already OBJ it is set properly already.
+ if (arg->OperGet() == GT_OBJ)
{
assert(!fgEntryPtr->structDesc.passedInRegisters);
continue;
@@ -4119,8 +4122,8 @@ void Compiler::fgMorphSystemVStructArgs(GenTreeCall* call, bool hasStructArgumen
assert(arg->OperGet() == GT_ADDR || arg->OperGet() == GT_LCL_VAR_ADDR);
- // Ldobj the temp to use it as a call argument
- arg = new (this, GT_LDOBJ) GenTreeLdObj(originalType, arg, lvaGetStruct(lclCommon->gtLclNum));
+ // Create an Obj of the temp to use it as a call argument.
+ arg = new (this, GT_OBJ) GenTreeObj(originalType, arg, lvaGetStruct(lclCommon->gtLclNum));
arg->gtFlags |= GTF_EXCEPT;
flagsSummary |= GTF_EXCEPT;
}
@@ -4184,7 +4187,7 @@ Compiler::fgMakeOutgoingStructArgCopy(GenTreeCall* call,
// See if we need to insert a copy at all
// Case 1: don't need a copy if it is the last use of a local. We can't determine that all of the time
// but if there is only one use and no loops, the use must be last.
- if (argx->gtOper == GT_LDOBJ)
+ if (argx->gtOper == GT_OBJ)
{
GenTree* lcl = argx->gtOp.gtOp1;
if (lcl->OperIsLocal())
@@ -4264,7 +4267,7 @@ Compiler::fgMakeOutgoingStructArgCopy(GenTreeCall* call,
lvaTable[tmp].incRefCnts(compCurBB->getBBWeight(this), this);
GenTreePtr src;
- if (argx->gtOper == GT_LDOBJ)
+ if (argx->gtOper == GT_OBJ)
{
src = argx->gtOp.gtOp1;
}
@@ -5207,7 +5210,7 @@ GenTreePtr Compiler::fgMorphField(GenTreePtr tree, MorphAddrContext* ma
We want to make it like this (when fldOffset is <= MAX_UNCHECKED_OFFSET_FOR_NULL_OBJECT):
+--------------------+
- | GT_IND | tree
+ | GT_IND/GT_OBJ | tree
+---------+----------+
|
|
@@ -5228,7 +5231,7 @@ GenTreePtr Compiler::fgMorphField(GenTreePtr tree, MorphAddrContext* ma
+--------------------+
- | GT_IND | tree
+ | GT_IND/GT_OBJ | tree
+----------+---------+
|
+----------+---------+
@@ -5835,20 +5838,19 @@ bool Compiler::fgCanFastTailCall(GenTreeCall* callee)
if (varTypeIsStruct(argx))
{
- // GT_LDOBJ may be a chile of a GT_COMMA. Skip over comma opers.
+ // Actual arg may be a child of a GT_COMMA. Skip over comma opers.
while (argx->gtOper == GT_COMMA)
{
argx = argx->gtOp.gtOp2;
}
- // Get the size of the struct and see if it is 1, 2, 4 or 8 bytes in size
- // For Amd64-Unix the call below checks to see if the struct is register passable.
- if (argx->OperGet() == GT_LDOBJ)
+ // Get the size of the struct and see if it is register passable.
+ if (argx->OperGet() == GT_OBJ)
{
#ifdef _TARGET_AMD64_
unsigned typeSize = 0;
- hasMultiByteArgs = !VarTypeIsMultiByteAndCanEnreg(argx->TypeGet(), argx->gtLdObj.gtClass, &typeSize, false);
+ hasMultiByteArgs = !VarTypeIsMultiByteAndCanEnreg(argx->TypeGet(), argx->gtObj.gtClass, &typeSize, false);
#if defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
// On System V the args could be a 2 eightbyte struct that is passed in two registers.
@@ -7954,7 +7956,7 @@ GenTreePtr Compiler::fgMorphCopyBlock(GenTreePtr tree)
}
}
- // Check to see if we a required to do a copy block because the struct contains holes
+ // Check to see if we are required to do a copy block because the struct contains holes
// and either the src or dest is externally visible
//
bool requiresCopyBlock = false;
@@ -9514,7 +9516,7 @@ NO_MUL_64RSLT:
break;
case GT_IND:
case GT_INITBLK:
- case GT_LDOBJ:
+ case GT_OBJ:
subMac1 = &subIndMac1;
break;
default:
@@ -15640,10 +15642,17 @@ bool Compiler::fgMorphImplicitByRefArgs(GenTreePtr tree, fgWalkData* fgWalkPre)
else
{
// Change X into *X
+ // First, save the original type, then change the tree type to TYP_BYREF (otherwise we
+ // will get an assert when we try to clone the lclVar node because the lclVar is now TYP_BYREF
+ // and the types have to match). The reason we clone the lclVar is that we don't pass a
+ // possible-modified tree back to the caller, so we modify the original lclVar node in-place
+ // to the GT_IND.
var_types structType = tree->gtType;
lclVarTree = gtClone(tree);
+ // Now, set the types appropriately.
lclVarTree->gtType = TYP_BYREF;
tree->gtType = structType;
+ // Now, "insert" the GT_IND by changing the oper of the original node and setting its op1.
tree->SetOper(GT_IND);
tree->gtOp.gtOp1 = lclVarTree;
// TODO-CQ: If the VM ever stops violating the ABI and passing heap references
@@ -15821,7 +15830,7 @@ Compiler::fgWalkResult Compiler::fgMarkAddrTakenLocalsPreCB(GenTreePtr* pTr
switch (tree->gtOper)
{
case GT_IND:
- case GT_LDOBJ:
+ case GT_OBJ:
if (axc != AXC_Addr)
{
axcStack->Push(AXC_Ind);
diff --git a/src/jit/rationalize.cpp b/src/jit/rationalize.cpp
index f7fad3e424..2eebb9804e 100644
--- a/src/jit/rationalize.cpp
+++ b/src/jit/rationalize.cpp
@@ -1538,10 +1538,10 @@ void Rationalizer::RewriteCopyBlk(GenTreePtr* ppTree, Compiler::fgWalkData* data
#endif // FEATURE_SIMD
}
-// Rewrite GT_LDOBJ of SIMD Vector as GT_IND(GT_LEA(ldobj.op1)) of a SIMD type.
+// Rewrite GT_OBJ of SIMD Vector as GT_IND(GT_LEA(obj.op1)) of a SIMD type.
//
// Arguments:
-// ppTree - A pointer-to-a-pointer for the GT_LDOBJ
+// ppTree - A pointer-to-a-pointer for the GT_OBJ
// fgWalkData - A pointer to tree walk data providing the context
//
// Return Value:
@@ -1550,16 +1550,16 @@ void Rationalizer::RewriteCopyBlk(GenTreePtr* ppTree, Compiler::fgWalkData* data
// TODO-Cleanup: Once SIMD types are plumbed through the frontend, this will no longer
// be required.
//
-void Rationalizer::RewriteLdObj(GenTreePtr* ppTree, Compiler::fgWalkData* data)
+void Rationalizer::RewriteObj(GenTreePtr* ppTree, Compiler::fgWalkData* data)
{
#ifdef FEATURE_SIMD
Compiler* comp = data->compiler;
- GenTreeLdObj* ldObj = (*ppTree)->AsLdObj();
+ GenTreeObj* obj = (*ppTree)->AsObj();
- // For UNIX struct passing, we can have LdObj nodes for arguments.
+ // For UNIX struct passing, we can have Obj nodes for arguments.
// For other cases, we should never see a non-SIMD type here.
#ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
- if (!varTypeIsSIMD(ldObj))
+ if (!varTypeIsSIMD(obj))
{
return;
}
@@ -1567,49 +1567,49 @@ void Rationalizer::RewriteLdObj(GenTreePtr* ppTree, Compiler::fgWalkData* data)
// Should come here only if featureSIMD is enabled
noway_assert(comp->featureSIMD);
// On we should only call this with a SIMD type.
- noway_assert(varTypeIsSIMD(ldObj));
- var_types simdType = ldObj->TypeGet();
+ noway_assert(varTypeIsSIMD(obj));
+ var_types simdType = obj->TypeGet();
- // If the operand of ldobj is a GT_ADDR(GT_LCL_VAR) and LclVar is known to be a SIMD type,
- // replace ldobj by GT_LCL_VAR.
- GenTree* srcAddr = ldObj->gtGetOp1();
+ // If the operand of obj is a GT_ADDR(GT_LCL_VAR) and LclVar is known to be a SIMD type,
+ // replace obj by GT_LCL_VAR.
+ GenTree* srcAddr = obj->gtGetOp1();
if (srcAddr->OperGet() == GT_ADDR && comp->isSIMDTypeLocal(srcAddr->gtGetOp1()))
{
GenTree* src = srcAddr->gtGetOp1();
comp->fgSnipInnerNode(srcAddr);
- // It is possible for the ldobj to be the last node in the tree, if its result is
+ // It is possible for the obj to be the last node in the tree, if its result is
// not actually stored anywhere and is not eliminated.
// This can happen with an unused SIMD expression involving a localVar or temporary value,
// where the SIMD expression is returning a non-SIMD value, and the expression is sufficiently
// complex (e.g. a call to vector * scalar which is inlined but not an intrinsic).
- // The ldobj of the localVar is not eliminated, because it involves an indirection,
- // and therefore appears potentially unsafe to eliminate. However, when we transform the ldobj into
+ // The obj of the localVar is not eliminated, because it involves an indirection,
+ // and therefore appears potentially unsafe to eliminate. However, when we transform the obj into
// a plain localVar during the Rationalizer, we need to correctly handle the case where it has
// no parent.
// This happens, for example, with this source code:
// Vector4.Dot(default(Vector4) * 2f, Vector4.One);
- if (ldObj->gtNext == nullptr)
+ if (obj->gtNext == nullptr)
{
SplitData *tmpState = (SplitData *) data->pCallbackData;
- comp->fgSnipNode(tmpState->root->AsStmt(), ldObj);
+ comp->fgSnipNode(tmpState->root->AsStmt(), obj);
}
else
{
- comp->fgSnipInnerNode(ldObj);
+ comp->fgSnipInnerNode(obj);
}
- comp->fgFixupIfCallArg(data->parentStack, ldObj, src);
+ comp->fgFixupIfCallArg(data->parentStack, obj, src);
src->gtType = simdType;
*ppTree = src;
}
else
{
- ldObj->SetOper(GT_IND);
- ldObj->gtType = simdType;
+ obj->SetOper(GT_IND);
+ obj->gtType = simdType;
}
#else
// we should never reach without feature SIMD
- assert(!"Unexpected Ldobj during rationalization\n");
+ assert(!"Unexpected obj during rationalization\n");
unreached();
#endif
}
@@ -1979,8 +1979,8 @@ Compiler::fgWalkResult Rationalizer::SimpleTransformHelper(GenTree **ppTree, Com
RewriteCopyBlk(ppTree, data);
break;
- case GT_LDOBJ:
- RewriteLdObj(ppTree, data);
+ case GT_OBJ:
+ RewriteObj(ppTree, data);
break;
case GT_LCL_FLD:
diff --git a/src/jit/rationalize.h b/src/jit/rationalize.h
index f5e142e596..90f39c32d0 100644
--- a/src/jit/rationalize.h
+++ b/src/jit/rationalize.h
@@ -168,7 +168,7 @@ private:
bool BreakFirstLevelQuestions (BasicBlock* block, GenTree* tree);
// SIMD related transformations
- static void RewriteLdObj(GenTreePtr* ppTree, Compiler::fgWalkData* data);
+ static void RewriteObj(GenTreePtr* ppTree, Compiler::fgWalkData* data);
static void RewriteCopyBlk(GenTreePtr* ppTree, Compiler::fgWalkData* data);
static void RewriteInitBlk(GenTreePtr* ppTree, Compiler::fgWalkData* data);
diff --git a/src/jit/regalloc.cpp b/src/jit/regalloc.cpp
index 739478c3f7..1675f2c018 100644
--- a/src/jit/regalloc.cpp
+++ b/src/jit/regalloc.cpp
@@ -4311,7 +4311,7 @@ HANDLE_SHIFT_COUNT:
goto RETURN_CHECK;
}
- case GT_LDOBJ:
+ case GT_OBJ:
{
#ifdef _TARGET_ARM_
if (predictReg <= PREDICT_REG)
@@ -4328,16 +4328,16 @@ HANDLE_SHIFT_COUNT:
}
#ifdef _TARGET_ARM_
- unsigned objSize = info.compCompHnd->getClassSize(tree->gtLdObj.gtClass);
+ unsigned objSize = info.compCompHnd->getClassSize(tree->gtObj.gtClass);
regMaskTP preferReg = rpPredictRegMask(predictReg, TYP_I_IMPL);
// If it has one bit set, and that's an arg reg...
if (preferReg != RBM_NONE && genMaxOneBit(preferReg) && ((preferReg & RBM_ARG_REGS) != 0))
{
- // We are passing the ldObj in the argument registers
+ // We are passing the 'obj' in the argument registers
//
regNumber rn = genRegNumFromMask(preferReg);
- // Add the registers used to pass the ldObj to regMask.
+ // Add the registers used to pass the 'obj' to regMask.
for (unsigned i = 0; i < objSize/4; i++)
{
if (rn == MAX_REG_ARG)
@@ -4349,8 +4349,8 @@ HANDLE_SHIFT_COUNT:
}
else
{
- // We are passing the ldObj in the outgoing arg space
- // We will need one register to load into unless the ldObj size is 4 or less.
+ // We are passing the 'obj' in the outgoing arg space
+ // We will need one register to load into unless the 'obj' size is 4 or less.
//
if (objSize > 4)
{
@@ -4617,22 +4617,22 @@ HANDLE_SHIFT_COUNT:
GenTreePtr argx = args;
GenTreePtr lclVarTree = NULL;
- /* The GT_LDOBJ may be be a child of a GT_COMMA */
+ /* The GT_OBJ may be be a child of a GT_COMMA */
while (argx->gtOper == GT_COMMA)
{
argx = argx->gtOp.gtOp2;
}
unsigned originalSize = 0;
- if (argx->gtOper == GT_LDOBJ)
+ if (argx->gtOper == GT_OBJ)
{
- originalSize = info.compCompHnd->getClassSize(argx->gtLdObj.gtClass);
+ originalSize = info.compCompHnd->getClassSize(argx->gtObj.gtClass);
// Is it the address of a promoted struct local?
- if (argx->gtLdObj.gtOp1->gtOper == GT_ADDR &&
- argx->gtLdObj.gtOp1->gtOp.gtOp1->gtOper == GT_LCL_VAR)
+ if (argx->gtObj.gtOp1->gtOper == GT_ADDR &&
+ argx->gtObj.gtOp1->gtOp.gtOp1->gtOper == GT_LCL_VAR)
{
- lclVarTree = argx->gtLdObj.gtOp1->gtOp.gtOp1;
+ lclVarTree = argx->gtObj.gtOp1->gtOp.gtOp1;
LclVarDsc* varDsc = &lvaTable[lclVarTree->gtLclVarCommon.gtLclNum];
if (varDsc->lvPromoted)
promotedStructLocal = varDsc;
@@ -4768,9 +4768,9 @@ HANDLE_SHIFT_COUNT:
tmpMask |= rpPredictTreeRegUse(args, argPredictReg, lockedRegs | regArgMask, RBM_LASTUSE);
}
- // We mark LDOBJ(ADDR(LOCAL)) with GTF_VAR_DEATH since the local is required to live
- // for the duration of the LDOBJ.
- if (args->OperGet() == GT_LDOBJ && (args->gtFlags & GTF_VAR_DEATH))
+ // We mark OBJ(ADDR(LOCAL)) with GTF_VAR_DEATH since the local is required to live
+ // for the duration of the OBJ.
+ if (args->OperGet() == GT_OBJ && (args->gtFlags & GTF_VAR_DEATH))
{
GenTreePtr lclVarTree = fgIsIndirOfAddrOfLocal(args);
assert(lclVarTree != NULL); // Or else would not be marked with GTF_VAR_DEATH.
diff --git a/src/jit/simd.cpp b/src/jit/simd.cpp
index a3055d27d2..122972a563 100644
--- a/src/jit/simd.cpp
+++ b/src/jit/simd.cpp
@@ -656,7 +656,7 @@ GenTreePtr Compiler::impSIMDPopStack(var_types type, bool expectAddr)
bool isParam = false;
// If we have a ldobj of a SIMD local we need to transform it.
- if (tree->OperGet() == GT_LDOBJ)
+ if (tree->OperGet() == GT_OBJ)
{
GenTree* addr = tree->gtOp.gtOp1;
if ((addr->OperGet() == GT_ADDR) && isSIMDTypeLocal(addr->gtOp.gtOp1))
@@ -2240,10 +2240,6 @@ GenTreePtr Compiler::impSIMDIntrinsic(OPCODE opcode,
// op2 is the second operand
op2 = impSIMDPopStack(simdType);
op1 = impSIMDPopStack(simdType, instMethod);
-
- assert(op1->TypeGet() == simdType);
- assert(op2->TypeGet() == simdType);
-
simdTree = gtNewSIMDNode(genActualType(callType), op1, op2, SIMDIntrinsicOpInEquality, baseType, size);
retVal = simdTree;
}
@@ -2258,9 +2254,6 @@ GenTreePtr Compiler::impSIMDIntrinsic(OPCODE opcode,
op2 = impSIMDPopStack(simdType);
op1 = impSIMDPopStack(simdType, instMethod);
- assert(op1->TypeGet() == simdType);
- assert(op2->TypeGet() == simdType);
-
SIMDIntrinsicID intrinsicID = impSIMDRelOp(simdIntrinsicID, clsHnd, size, &baseType, &op1, &op2);
simdTree = gtNewSIMDNode(genActualType(callType), op1, op2, intrinsicID, baseType, size);
retVal = simdTree;
diff --git a/src/jit/simdcodegenxarch.cpp b/src/jit/simdcodegenxarch.cpp
index f419055525..df075f5ca9 100644
--- a/src/jit/simdcodegenxarch.cpp
+++ b/src/jit/simdcodegenxarch.cpp
@@ -537,8 +537,7 @@ CodeGen::genSIMDScalarMove(var_types type, regNumber targetReg, regNumber srcReg
}
else
{
- instruction ins = getOpForSIMDIntrinsic(SIMDIntrinsicBitwiseXor, type);
- inst_RV_RV(ins, targetReg, targetReg, targetType, emitTypeSize(targetType));
+ genSIMDZero(targetType, TYP_FLOAT, targetReg);
inst_RV_RV(ins_Store(type), targetReg, srcReg);
}
break;
@@ -556,6 +555,14 @@ CodeGen::genSIMDScalarMove(var_types type, regNumber targetReg, regNumber srcReg
}
}
+void
+CodeGen::genSIMDZero(var_types targetType, var_types baseType, regNumber targetReg)
+{
+ // pxor reg, reg
+ instruction ins = getOpForSIMDIntrinsic(SIMDIntrinsicBitwiseXor, baseType);
+ inst_RV_RV(ins, targetReg, targetReg, targetType, emitActualTypeSize(targetType));
+}
+
//------------------------------------------------------------------------
// genSIMDIntrinsicInit: Generate code for SIMD Intrinsic Initialize.
//
@@ -586,9 +593,7 @@ CodeGen::genSIMDIntrinsicInit(GenTreeSIMD* simdNode)
{
if (op1->IsZero())
{
- // pxor reg, reg
- ins = getOpForSIMDIntrinsic(SIMDIntrinsicBitwiseXor, baseType);
- inst_RV_RV(ins, targetReg, targetReg, targetType, emitActualTypeSize(targetType));
+ genSIMDZero(targetType, baseType, targetReg);
}
else if ((baseType == TYP_INT && op1->IsCnsIntOrI() && op1->AsIntConCommon()->IconValue() == 0xffffffff) ||
(baseType == TYP_LONG && op1->IsCnsIntOrI() && op1->AsIntConCommon()->IconValue() == 0xffffffffffffffffLL))
@@ -732,12 +737,11 @@ CodeGen::genSIMDIntrinsicInitN(GenTreeSIMD* simdNode)
assert(genCountBits(simdNode->gtRsvdRegs) == 1);
regNumber vectorReg = genRegNumFromMask(simdNode->gtRsvdRegs);
- // Zero out vectorReg if we are constructing a vector whose size is not equal to the SIMD vector size.
+ // Zero out vectorReg if we are constructing a vector whose size is not equal to targetType vector size.
// For example in case of Vector4f we don't need to zero when using SSE2.
if (compiler->isSubRegisterSIMDType(simdNode))
{
- instruction ins = getOpForSIMDIntrinsic(SIMDIntrinsicBitwiseXor, baseType);
- inst_RV_RV(ins, vectorReg, vectorReg, targetType, emitActualTypeSize(targetType));
+ genSIMDZero(targetType, baseType, vectorReg);
}
unsigned int baseTypeSize = genTypeSize(baseType);
diff --git a/src/jit/stackfp.cpp b/src/jit/stackfp.cpp
index 2c0150d520..eeeadbb278 100644
--- a/src/jit/stackfp.cpp
+++ b/src/jit/stackfp.cpp
@@ -4308,99 +4308,6 @@ void Compiler::raEnregisterVarsPostPassStackFP ()
{
raSetRegLclBirthDeath(tree, lastlife, false);
}
- else if (tree->gtOper == GT_LDOBJ)
- {
- GenTreePtr lclVarTree;
-
- if (impIsAddressInLocal(tree->gtOp.gtOp1, &lclVarTree))
- {
- assert(lclVarTree->gtOper == GT_LCL_VAR);
- assert(lclVarTree->gtType == TYP_STRUCT);
-
- LclVarDsc * varDsc = lvaTable + lclVarTree->gtLclVarCommon.gtLclNum;
-
-#ifdef DEBUG
- if (verbose)
- {
- gtDispTree(tree);
- }
-#endif
-
- if (varDsc->lvContainsFloatingFields)
- {
- assert(varDsc->lvPromoted);
-
- tree->gtLdObj.gtFldTreeList = new (this, CMK_ASTNode) GenTree*[MAX_NumOfFieldsInPromotableStruct];
-#ifdef DEBUG
- if (verbose && 0)
- {
- printf("gtFldTreeList=");
- Compiler::printTreeID(*(tree->gtLdObj.gtFldTreeList));
- printf("\n");
- }
-#endif
-
- VARSET_TP* deadVarBits = NULL;
- if (lclVarTree->TypeGet() == TYP_STRUCT)
- GetPromotedStructDeathVars()->Lookup(lclVarTree, &deadVarBits);
-
- // Re-thread the floating nodes into the gtPrev/gtNext
- GenTreePtr prev = lclVarTree->gtPrev;
- for (unsigned index=0;
- index<varDsc->lvFieldCnt;
- ++index)
- {
- LclVarDsc * fieldVarDsc = &lvaTable[varDsc->lvFieldLclStart+index];
-
- if (varTypeIsFloating((var_types)fieldVarDsc->lvType))
- {
- GenTreePtr fieldTree = gtNewLclvNode(varDsc->lvFieldLclStart+index,
- lvaTable[varDsc->lvFieldLclStart+index].TypeGet());
-
- raSetRegLclBirthDeath(fieldTree, lastlife, true);
- if ((tree->gtFlags & GTF_VAR_DEATH) != 0)
- {
- if (fieldVarDsc->lvTracked &&
- (deadVarBits == NULL || VarSetOps::IsMember(this, *deadVarBits, fieldVarDsc->lvVarIndex)))
- {
- fieldTree->gtFlags |= GTF_VAR_DEATH;
- }
- }
-
- // Note that assignments to promoted struct fields are always "exploded", so there
- // is no dual situation to the considerations above for the GTF_VAR_DEF case.
- if ((lclVarTree->gtFlags & GTF_VAR_DEF) != 0) fieldTree->gtFlags |= GTF_VAR_DEF;
-
- tree->gtLdObj.gtFldTreeList[index] = fieldTree;
- fieldTree->gtPrev = prev;
- if (prev != NULL) prev->gtNext = fieldTree;
- else
- {
- assert (stmt->gtStmt.gtStmtList == lclVarTree);
- stmt->gtStmt.gtStmtList = fieldTree;
- }
- prev = fieldTree;
- }
- else
- {
- tree->gtLdObj.gtFldTreeList[index] = NULL;
- }
- }
- lclVarTree->gtPrev = prev;
- prev->gtNext = lclVarTree;
- }
- else
- {
-#ifdef DEBUG
- if (verbose)
- {
- printf("V%02u does NOT contains floating field.\n", lclVarTree->gtLclVarCommon.gtLclNum);
- }
-#endif
- }
- }
- }
-
}
}
assert(VarSetOps::Equal(this, lastlife, block->bbLiveOut));
diff --git a/src/jit/valuenum.cpp b/src/jit/valuenum.cpp
index 16183ac9c6..58c99d07bf 100644
--- a/src/jit/valuenum.cpp
+++ b/src/jit/valuenum.cpp
@@ -3567,7 +3567,7 @@ static genTreeOps genTreeOpsIllegalAsVNFunc[] = {
GT_COPYOBJ, // A kind of assignment.
GT_ADDR,
GT_ARR_BOUNDS_CHECK,
- GT_LDOBJ, // Heap memory.
+ GT_OBJ, // May reference heap memory.
// These control-flow operations need no values.
GT_JTRUE,