summaryrefslogtreecommitdiff
path: root/src/jit/gentree.h
diff options
context:
space:
mode:
Diffstat (limited to 'src/jit/gentree.h')
-rw-r--r--src/jit/gentree.h3468
1 files changed, 3468 insertions, 0 deletions
diff --git a/src/jit/gentree.h b/src/jit/gentree.h
new file mode 100644
index 0000000000..1c631179c9
--- /dev/null
+++ b/src/jit/gentree.h
@@ -0,0 +1,3468 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+/*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
+XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
+XX XX
+XX GenTree XX
+XX XX
+XX This is the node in the semantic tree graph. It represents the operation XX
+XX corresponding to the node, and other information during code-gen. XX
+XX XX
+XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
+XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
+*/
+
+/*****************************************************************************/
+#ifndef _GENTREE_H_
+#define _GENTREE_H_
+/*****************************************************************************/
+
+#include "vartype.h" // For "var_types"
+#include "target.h" // For "regNumber"
+#include "ssaconfig.h" // For "SsaConfig::RESERVED_SSA_NUM"
+#include "reglist.h"
+#include "valuenumtype.h"
+#include "simplerhash.h"
+#include "nodeinfo.h"
+#include "simd.h"
+
+// Debugging GenTree is much easier if we add a magic virtual function to make the debugger able to figure out what type it's got.
+// This is enabled by default in DEBUG. To enable it in RET builds (temporarily!), you need to change the build to define DEBUGGABLE_GENTREE=1,
+// as well as pass /OPT:NOICF to the linker (or else all the vtables get merged, making the debugging value supplied by them useless).
+// See protojit.nativeproj for a commented example of setting the build flags correctly.
+#ifndef DEBUGGABLE_GENTREE
+#ifdef DEBUG
+#define DEBUGGABLE_GENTREE 1
+#else // !DEBUG
+#define DEBUGGABLE_GENTREE 0
+#endif // !DEBUG
+#endif // !DEBUGGABLE_GENTREE
+
+/*****************************************************************************/
+
+DECLARE_TYPED_ENUM(genTreeOps,BYTE)
+{
+ #define GTNODE(en,sn,cm,ok) en,
+ #include "gtlist.h"
+
+ GT_COUNT,
+
+ // GT_CNS_NATIVELONG is the gtOper symbol for GT_CNS_LNG or GT_CNS_INT, depending on the target.
+ // For the 64-bit targets we will only use GT_CNS_INT as it used to represent all the possible sizes
+ // For the 32-bit targets we use a GT_CNS_LNG to hold a 64-bit integer constant and GT_CNS_INT for all others.
+ // In the future when we retarget the JIT for x86 we should consider eliminating GT_CNS_LNG
+ //
+#ifdef _TARGET_64BIT_
+ GT_CNS_NATIVELONG = GT_CNS_INT,
+#else
+ GT_CNS_NATIVELONG = GT_CNS_LNG,
+#endif
+}
+END_DECLARE_TYPED_ENUM(genTreeOps,BYTE)
+
+/*****************************************************************************
+ *
+ * The following enum defines a set of bit flags that can be used
+ * to classify expression tree nodes. Note that some operators will
+ * have more than one bit set, as follows:
+ *
+ * GTK_CONST implies GTK_LEAF
+ * GTK_RELOP implies GTK_BINOP
+ * GTK_LOGOP implies GTK_BINOP
+ */
+
+enum genTreeKinds
+{
+ GTK_SPECIAL = 0x0000, // unclassified operator (special handling reqd)
+
+ GTK_CONST = 0x0001, // constant operator
+ GTK_LEAF = 0x0002, // leaf operator
+ GTK_UNOP = 0x0004, // unary operator
+ GTK_BINOP = 0x0008, // binary operator
+ GTK_RELOP = 0x0010, // comparison operator
+ GTK_LOGOP = 0x0020, // logical operator
+ GTK_ASGOP = 0x0040, // assignment operator
+
+ GTK_KINDMASK= 0x007F, // operator kind mask
+
+ GTK_COMMUTE = 0x0080, // commutative operator
+
+ GTK_EXOP = 0x0100, // Indicates that an oper for a node type that extends GenTreeOp (or GenTreeUnOp)
+ // by adding non-node fields to unary or binary operator.
+
+ GTK_LOCAL = 0x0200, // is a local access (load, store, phi)
+
+ /* Define composite value(s) */
+
+ GTK_SMPOP = (GTK_UNOP|GTK_BINOP|GTK_RELOP|GTK_LOGOP)
+};
+
+/*****************************************************************************/
+
+#define SMALL_TREE_NODES 1
+
+/*****************************************************************************/
+
+DECLARE_TYPED_ENUM(gtCallTypes,BYTE)
+{
+ CT_USER_FUNC, // User function
+ CT_HELPER, // Jit-helper
+ CT_INDIRECT, // Indirect call
+
+ CT_COUNT // fake entry (must be last)
+}
+END_DECLARE_TYPED_ENUM(gtCallTypes,BYTE)
+
+
+/*****************************************************************************/
+
+struct BasicBlock;
+
+struct InlineCandidateInfo;
+
+/*****************************************************************************/
+
+// GT_FIELD nodes will be lowered into more "code-gen-able" representations, like
+// GT_IND's of addresses, or GT_LCL_FLD nodes. We'd like to preserve the more abstract
+// information, and will therefore annotate such lowered nodes with FieldSeq's. A FieldSeq
+// represents a (possibly) empty sequence of fields. The fields are in the order
+// in which they are dereferenced. The first field may be an object field or a struct field;
+// all subsequent fields must be struct fields.
+struct FieldSeqNode
+{
+ CORINFO_FIELD_HANDLE m_fieldHnd;
+ FieldSeqNode* m_next;
+
+ FieldSeqNode(CORINFO_FIELD_HANDLE fieldHnd, FieldSeqNode* next) : m_fieldHnd(fieldHnd), m_next(next) {}
+
+ // returns true when this is the pseudo #FirstElem field sequence
+ bool IsFirstElemFieldSeq();
+
+ // returns true when this is the pseudo #ConstantIndex field sequence
+ bool IsConstantIndexFieldSeq();
+
+ // returns true when this is the the pseudo #FirstElem field sequence or the pseudo #ConstantIndex field sequence
+ bool IsPseudoField();
+
+ // Make sure this provides methods that allow it to be used as a KeyFuncs type in SimplerHash.
+ static int GetHashCode(FieldSeqNode fsn)
+ {
+ return reinterpret_cast<int>(fsn.m_fieldHnd) ^ reinterpret_cast<int>(fsn.m_next);
+ }
+
+ static bool Equals(FieldSeqNode fsn1, FieldSeqNode fsn2)
+ {
+ return fsn1.m_fieldHnd == fsn2.m_fieldHnd && fsn1.m_next == fsn2.m_next;
+ }
+};
+
+// This class canonicalizes field sequences.
+class FieldSeqStore
+{
+ typedef SimplerHashTable<FieldSeqNode, /*KeyFuncs*/FieldSeqNode, FieldSeqNode*, DefaultSimplerHashBehavior> FieldSeqNodeCanonMap;
+
+ IAllocator* m_alloc;
+ FieldSeqNodeCanonMap* m_canonMap;
+
+ static FieldSeqNode s_notAField; // No value, just exists to provide an address.
+
+ // Dummy variables to provide the addresses for the "pseudo field handle" statics below.
+ static int FirstElemPseudoFieldStruct;
+ static int ConstantIndexPseudoFieldStruct;
+
+public:
+ FieldSeqStore(IAllocator* alloc);
+
+ // Returns the (canonical in the store) singleton field sequence for the given handle.
+ FieldSeqNode* CreateSingleton(CORINFO_FIELD_HANDLE fieldHnd);
+
+ // This is a special distinguished FieldSeqNode indicating that a constant does *not*
+ // represent a valid field sequence. This is "infectious", in the sense that appending it
+ // (on either side) to any field sequence yields the "NotAField()" sequence.
+ static FieldSeqNode* NotAField() { return &s_notAField; }
+
+ // Returns the (canonical in the store) field sequence representing the concatenation of
+ // the sequences represented by "a" and "b". Assumes that "a" and "b" are canonical; that is,
+ // they are the results of CreateSingleton, NotAField, or Append calls. If either of the arguments
+ // are the "NotAField" value, so is the result.
+ FieldSeqNode* Append(FieldSeqNode* a, FieldSeqNode* b);
+
+ // We have a few "pseudo" field handles:
+
+ // This treats the constant offset of the first element of something as if it were a field.
+ // Works for method table offsets of boxed structs, or first elem offset of arrays/strings.
+ static CORINFO_FIELD_HANDLE FirstElemPseudoField;
+
+ // If there is a constant index, we make a psuedo field to correspond to the constant added to
+ // offset of the indexed field. This keeps the field sequence structure "normalized", especially in the
+ // case where the element type is a struct, so we might add a further struct field offset.
+ static CORINFO_FIELD_HANDLE ConstantIndexPseudoField;
+
+ static bool IsPseudoField(CORINFO_FIELD_HANDLE hnd)
+ {
+ return hnd == FirstElemPseudoField || hnd == ConstantIndexPseudoField;
+ }
+};
+
+
+
+/*****************************************************************************/
+
+typedef struct GenTree * GenTreePtr;
+struct GenTreeArgList;
+
+// Forward declarations of the subtypes
+#define GTSTRUCT_0(fn, en) struct GenTree##fn;
+#define GTSTRUCT_1(fn, en) struct GenTree##fn;
+#define GTSTRUCT_2(fn, en, en2) struct GenTree##fn;
+#define GTSTRUCT_3(fn, en, en2, en3) struct GenTree##fn;
+#define GTSTRUCT_4(fn, en, en2, en3, en4) struct GenTree##fn;
+#define GTSTRUCT_N(fn, ...) struct GenTree##fn;
+#include "gtstructs.h"
+
+/*****************************************************************************/
+
+#ifndef _WIN64
+#include <pshpack4.h>
+#endif
+
+struct GenTree
+{
+ // We use GT_STRUCT_0 only for the category of simple ops.
+#define GTSTRUCT_0(fn, en) GenTree##fn* As##fn() \
+ { \
+ assert(this->OperIsSimple()); \
+ return reinterpret_cast<GenTree##fn*>(this); \
+ } \
+ GenTree##fn& As##fn##Ref() { return *As##fn(); } \
+ __declspec(property(get=As##fn##Ref)) GenTree##fn& gt##fn;
+#define GTSTRUCT_1(fn, en) GenTree##fn* As##fn() \
+ { \
+ assert(this->gtOper == en); \
+ return reinterpret_cast<GenTree##fn*>(this); \
+ } \
+ GenTree##fn& As##fn##Ref() { return *As##fn(); } \
+ __declspec(property(get=As##fn##Ref)) GenTree##fn& gt##fn;
+#define GTSTRUCT_2(fn, en, en2) GenTree##fn* As##fn() \
+ { \
+ assert(this->gtOper == en || this->gtOper == en2); \
+ return reinterpret_cast<GenTree##fn*>(this); \
+ } \
+ GenTree##fn& As##fn##Ref() { return *As##fn(); } \
+ __declspec(property(get=As##fn##Ref)) GenTree##fn& gt##fn;
+#define GTSTRUCT_3(fn, en, en2, en3) GenTree##fn* As##fn() \
+ { \
+ assert(this->gtOper == en || this->gtOper == en2 || this->gtOper == en3); \
+ return reinterpret_cast<GenTree##fn*>(this); \
+ } \
+ GenTree##fn& As##fn##Ref() { return *As##fn(); } \
+ __declspec(property(get=As##fn##Ref)) GenTree##fn& gt##fn;
+
+#define GTSTRUCT_4(fn, en, en2, en3, en4) GenTree##fn* As##fn() \
+ { \
+ assert(this->gtOper == en || this->gtOper == en2 || this->gtOper == en3 || this->gtOper == en4); \
+ return reinterpret_cast<GenTree##fn*>(this); \
+ } \
+ GenTree##fn& As##fn##Ref() { return *As##fn(); } \
+ __declspec(property(get=As##fn##Ref)) GenTree##fn& gt##fn;
+
+#ifdef DEBUG
+// VC does not optimize out this loop in retail even though the value it computes is unused
+// so we need a separate version for non-debug
+#define GTSTRUCT_N(fn, ...) GenTree##fn* As##fn() \
+ { \
+ genTreeOps validOps[] = {__VA_ARGS__}; \
+ bool found = false; \
+ for (unsigned i=0; i<ArrLen(validOps); i++) { \
+ if (this->gtOper == validOps[i]) \
+ { \
+ found = true; \
+ break; \
+ } \
+ } \
+ assert(found); \
+ return reinterpret_cast<GenTree##fn*>(this); \
+ } \
+ GenTree##fn& As##fn##Ref() { return *As##fn(); } \
+ __declspec(property(get=As##fn##Ref)) GenTree##fn& gt##fn;
+#else
+#define GTSTRUCT_N(fn, ...) GenTree##fn* As##fn() \
+ { \
+ return reinterpret_cast<GenTree##fn*>(this); \
+ } \
+ GenTree##fn& As##fn##Ref() { return *As##fn(); } \
+ __declspec(property(get=As##fn##Ref)) GenTree##fn& gt##fn;
+#endif
+
+#include "gtstructs.h"
+
+ genTreeOps gtOper; // enum subtype BYTE
+ var_types gtType; // enum subtype BYTE
+
+ genTreeOps OperGet() const { return gtOper; }
+ var_types TypeGet() const { return gtType; }
+
+#ifdef DEBUG
+ genTreeOps gtOperSave; // Only used to save gtOper when we destroy a node, to aid debugging.
+#endif
+
+#if FEATURE_ANYCSE
+
+#define NO_CSE (0)
+
+#define IS_CSE_INDEX(x) (x != 0)
+#define IS_CSE_USE(x) (x > 0)
+#define IS_CSE_DEF(x) (x < 0)
+#define GET_CSE_INDEX(x) ((x > 0) ? x : -x)
+#define TO_CSE_DEF(x) (-x)
+
+ signed char gtCSEnum; // 0 or the CSE index (negated if def)
+ // valid only for CSE expressions
+
+#endif // FEATURE_ANYCSE
+
+#if ASSERTION_PROP
+ unsigned char gtAssertionNum; // 0 or Assertion table index
+ // valid only for non-GT_STMT nodes
+
+ bool HasAssertion() const { return gtAssertionNum != 0; }
+ void ClearAssertion() { gtAssertionNum = 0; }
+
+ unsigned int GetAssertion() const { return gtAssertionNum; }
+ void SetAssertion(unsigned int value) { assert((unsigned char)value == value); gtAssertionNum = (unsigned char)value; }
+
+#endif
+
+#if FEATURE_STACK_FP_X87
+ unsigned char gtFPlvl; // x87 stack depth at this node
+ void gtCopyFPlvl(GenTree * other) { gtFPlvl = other->gtFPlvl; }
+ void gtSetFPlvl(unsigned level) { noway_assert(FitsIn<unsigned char>(level)); gtFPlvl = (unsigned char)level; }
+#else // FEATURE_STACK_FP_X87
+ void gtCopyFPlvl(GenTree * other) { }
+ void gtSetFPlvl(unsigned level) { }
+#endif // FEATURE_STACK_FP_X87
+
+ //
+ // Cost metrics on the node. Don't allow direct access to the variable for setting.
+ //
+
+public:
+
+#ifdef DEBUG
+ // You are not allowed to read the cost values before they have been set in gtSetEvalOrder().
+ // Keep track of whether the costs have been initialized, and assert if they are read before being initialized.
+ // Obviously, this information does need to be initialized when a node is created.
+ // This is public so the dumpers can see it.
+
+ bool gtCostsInitialized;
+#endif // DEBUG
+
+#define MAX_COST UCHAR_MAX
+#define IND_COST_EX 3 // execution cost for an indirection
+
+ __declspec(property(get=GetCostEx))
+ unsigned char gtCostEx; // estimate of expression execution cost
+
+ __declspec(property(get=GetCostSz))
+ unsigned char gtCostSz; // estimate of expression code size cost
+
+ unsigned char GetCostEx() const { assert(gtCostsInitialized); return _gtCostEx; }
+ unsigned char GetCostSz() const { assert(gtCostsInitialized); return _gtCostSz; }
+
+ // Set the costs. They are always both set at the same time.
+ // Don't use the "put" property: force calling this function, to make it more obvious in the few places
+ // that set the values.
+ // Note that costs are only set in gtSetEvalOrder() and its callees.
+ void SetCosts(unsigned costEx, unsigned costSz)
+ {
+ assert(costEx != (unsigned)-1); // looks bogus
+ assert(costSz != (unsigned)-1); // looks bogus
+ INDEBUG(gtCostsInitialized = true;)
+
+ _gtCostEx = (costEx > MAX_COST) ? MAX_COST : (unsigned char)costEx;
+ _gtCostSz = (costSz > MAX_COST) ? MAX_COST : (unsigned char)costSz;
+ }
+
+ // Opimized copy function, to avoid the SetCosts() function comparisons, and make it more clear that a node copy is happening.
+ void CopyCosts(const GenTree* const tree)
+ {
+ INDEBUG(gtCostsInitialized = tree->gtCostsInitialized;) // If the 'tree' costs aren't initialized, we'll hit an assert below.
+ _gtCostEx = tree->gtCostEx;
+ _gtCostSz = tree->gtCostSz;
+ }
+
+ // Same as CopyCosts, but avoids asserts if the costs we are copying have not been initialized.
+ // This is because the importer, for example, clones nodes, before these costs have been initialized.
+ // Note that we directly access the 'tree' costs, not going through the accessor functions (either
+ // directly or through the properties).
+ void CopyRawCosts(const GenTree* const tree)
+ {
+ INDEBUG(gtCostsInitialized = tree->gtCostsInitialized;)
+ _gtCostEx = tree->_gtCostEx;
+ _gtCostSz = tree->_gtCostSz;
+ }
+
+private:
+
+ unsigned char _gtCostEx; // estimate of expression execution cost
+ unsigned char _gtCostSz; // estimate of expression code size cost
+
+ //
+ // Register or register pair number of the node.
+ //
+
+#ifdef DEBUG
+public:
+ enum genRegTag
+ {
+ GT_REGTAG_NONE, // Nothing has been assigned to _gtRegNum/_gtRegPair
+ GT_REGTAG_REG, // _gtRegNum has been assigned
+ GT_REGTAG_REGPAIR // _gtRegPair has been assigned
+ };
+ genRegTag GetRegTag() const
+ {
+ assert(gtRegTag == GT_REGTAG_NONE ||
+ gtRegTag == GT_REGTAG_REG ||
+ gtRegTag == GT_REGTAG_REGPAIR);
+ return gtRegTag;
+ }
+private:
+ genRegTag gtRegTag; // What is in _gtRegNum/_gtRegPair?
+#endif // DEBUG
+
+private:
+
+ union
+ {
+ // NOTE: After LSRA, one of these values may be valid even if GTF_REG_VAL is not set in gtFlags.
+ // They store the register assigned to the node. If a register is not assigned, _gtRegNum is set to REG_NA
+ // or _gtRegPair is set to REG_PAIR_NONE, depending on the node type.
+ regNumberSmall _gtRegNum; // which register the value is in
+ regPairNoSmall _gtRegPair; // which register pair the value is in
+ };
+
+public:
+
+ // The register number is stored in a small format (8 bits), but the getters return and the setters take
+ // a full-size (unsigned) format, to localize the casts here.
+
+ __declspec(property(get=GetRegNum,put=SetRegNum))
+ regNumber gtRegNum;
+
+ // for codegen purposes, is this node a subnode of its parent
+ bool isContained() const;
+
+ bool isContainedIndir() const;
+
+ bool isIndirAddrMode();
+
+ bool isIndir() const;
+
+ bool isContainedIntOrIImmed() const { return isContained() && IsCnsIntOrI(); }
+
+ bool isContainedFltOrDblImmed() const { return isContained() && (OperGet() == GT_CNS_DBL); }
+
+ bool isLclField() const { return OperGet() == GT_LCL_FLD || OperGet() == GT_STORE_LCL_FLD; }
+
+ bool isContainedLclField() const { return isContained() && isLclField(); }
+
+ // Indicates whether it is a memory op.
+ // Right now it includes Indir and LclField ops.
+ bool isMemoryOp() const { return isIndir() || isLclField(); }
+
+ bool isContainedMemoryOp() const { return isContained() && isMemoryOp(); }
+
+ regNumber GetRegNum() const
+ {
+ assert((gtRegTag == GT_REGTAG_REG) ||
+ (gtRegTag == GT_REGTAG_NONE)); // TODO-Cleanup: get rid of the NONE case, and fix everyplace that reads undefined values
+ regNumber reg = (regNumber) _gtRegNum;
+ assert((gtRegTag == GT_REGTAG_NONE) || // TODO-Cleanup: get rid of the NONE case, and fix everyplace that reads undefined values
+ (reg >= REG_FIRST &&
+ reg <= REG_COUNT));
+ return reg;
+ }
+
+ void SetRegNum(regNumber reg)
+ {
+ assert(reg >= REG_FIRST &&
+ reg <= REG_COUNT);
+ // Make sure the upper bits of _gtRegPair are clear
+ _gtRegPair = (regPairNoSmall) 0;
+ _gtRegNum = (regNumberSmall) reg;
+ INDEBUG(gtRegTag = GT_REGTAG_REG;)
+ assert(_gtRegNum == reg);
+ }
+
+ __declspec(property(get=GetRegPair,put=SetRegPair))
+ regPairNo gtRegPair;
+
+ regPairNo GetRegPair() const
+ {
+ assert((gtRegTag == GT_REGTAG_REGPAIR) ||
+ (gtRegTag == GT_REGTAG_NONE)); // TODO-Cleanup: get rid of the NONE case, and fix everyplace that reads undefined values
+ regPairNo regPair = (regPairNo) _gtRegPair;
+ assert((gtRegTag == GT_REGTAG_NONE) || // TODO-Cleanup: get rid of the NONE case, and fix everyplace that reads undefined values
+ (regPair >= REG_PAIR_FIRST &&
+ regPair <= REG_PAIR_LAST) ||
+ (regPair == REG_PAIR_NONE)); // allow initializing to an undefined value
+ return regPair;
+ }
+
+ void SetRegPair(regPairNo regPair)
+ {
+ assert((regPair >= REG_PAIR_FIRST &&
+ regPair <= REG_PAIR_LAST) ||
+ (regPair == REG_PAIR_NONE)); // allow initializing to an undefined value
+ _gtRegPair = (regPairNoSmall) regPair;
+ INDEBUG(gtRegTag = GT_REGTAG_REGPAIR;)
+ assert(_gtRegPair == regPair);
+ }
+
+ // Copy the _gtRegNum/_gtRegPair/gtRegTag fields
+ void CopyReg(GenTreePtr from)
+ {
+ // To do the copy, use _gtRegPair, which must be bigger than _gtRegNum. Note that the values
+ // might be undefined (so gtRegTag == GT_REGTAG_NONE).
+ _gtRegPair = from->_gtRegPair;
+ C_ASSERT(sizeof(_gtRegPair) >= sizeof(_gtRegNum));
+ INDEBUG(gtRegTag = from->gtRegTag;)
+ }
+
+ void gtClearReg(Compiler* compiler);
+
+ bool gtHasReg() const
+ {
+ // Has the node been assigned a register by LSRA?
+ //
+ // In order for this to work properly, gtClearReg (above) must be called prior to setting
+ // the register value.
+ if (isRegPairType(TypeGet()))
+ {
+ assert(_gtRegNum != REG_NA);
+ INDEBUG(assert(gtRegTag == GT_REGTAG_REGPAIR));
+ return gtRegPair != REG_PAIR_NONE;
+ }
+ else
+ {
+ assert(_gtRegNum != REG_PAIR_NONE);
+ INDEBUG(assert(gtRegTag == GT_REGTAG_REG));
+ return gtRegNum != REG_NA;
+ }
+ }
+
+ regMaskTP gtGetRegMask() const
+ {
+ if (isRegPairType(TypeGet()))
+ {
+ return genRegPairMask(gtRegPair);
+ }
+ else
+ {
+ return genRegMask(gtRegNum);
+ }
+ }
+
+ unsigned gtFlags; // see GTF_xxxx below
+
+ ValueNumPair gtVNPair;
+
+ regMaskSmall gtRsvdRegs; // set of fixed trashed registers
+#ifdef LEGACY_BACKEND
+ regMaskSmall gtUsedRegs; // set of used (trashed) registers
+#endif // LEGACY_BACKEND
+
+#ifndef LEGACY_BACKEND
+ TreeNodeInfo gtLsraInfo;
+#endif // !LEGACY_BACKEND
+
+ void SetVNsFromNode(GenTreePtr tree)
+ {
+ gtVNPair = tree->gtVNPair;
+ }
+
+ ValueNum GetVN(ValueNumKind vnk) const
+ {
+ if (vnk == VNK_Liberal)
+ {
+ return gtVNPair.GetLiberal();
+ }
+ else
+ {
+ assert(vnk == VNK_Conservative);
+ return gtVNPair.GetConservative();
+ }
+ }
+ void SetVN(ValueNumKind vnk, ValueNum vn)
+ {
+ if (vnk == VNK_Liberal)
+ {
+ return gtVNPair.SetLiberal(vn);
+ }
+ else
+ {
+ assert(vnk == VNK_Conservative);
+ return gtVNPair.SetConservative(vn);
+ }
+ }
+
+
+
+ //---------------------------------------------------------------------
+ // The first set of flags can be used with a large set of nodes, and
+ // thus they must all have distinct values. That is, one can test any
+ // expression node for one of these flags.
+ //---------------------------------------------------------------------
+
+ #define GTF_ASG 0x00000001 // sub-expression contains an assignment
+ #define GTF_CALL 0x00000002 // sub-expression contains a func. call
+ #define GTF_EXCEPT 0x00000004 // sub-expression might throw an exception
+ #define GTF_GLOB_REF 0x00000008 // sub-expression uses global variable(s)
+ #define GTF_ORDER_SIDEEFF 0x00000010 // sub-expression has a re-ordering side effect
+
+ // If you set these flags, make sure that code:gtExtractSideEffList knows how to find the tree,
+ // otherwise the C# (run csc /o-)
+ // var v = side_eff_operation
+ // with no use of v will drop your tree on the floor.
+ #define GTF_PERSISTENT_SIDE_EFFECTS (GTF_ASG|GTF_CALL)
+ #define GTF_SIDE_EFFECT (GTF_PERSISTENT_SIDE_EFFECTS|GTF_EXCEPT)
+ #define GTF_GLOB_EFFECT (GTF_SIDE_EFFECT|GTF_GLOB_REF)
+ #define GTF_ALL_EFFECT (GTF_GLOB_EFFECT|GTF_ORDER_SIDEEFF)
+
+ // The extra flag GTF_DEAD is used to tell the consumer of these flags
+ // that we are calling in the context of performing a CSE, thus we
+ // should allow the run-once side effects of running a class constructor.
+ //
+ #define GTF_PERSISTENT_SIDE_EFFECTS_IN_CSE (GTF_ASG|GTF_CALL|GTF_DEAD)
+
+ // Can any side-effects be observed externally, say by a caller method?
+ // For assignments, only assignments to global memory can be observed
+ // externally, whereas simple assignments to local variables can not.
+ //
+ // Be careful when using this inside a "try" protected region as the
+ // order of assignments to local variables would need to be preserved
+ // wrt side effects if the variables are alive on entry to the
+ // "catch/finally" region. In such cases, even assignments to locals
+ // will have to be restricted.
+ #define GTF_GLOBALLY_VISIBLE_SIDE_EFFECTS(flags) \
+ (((flags) & (GTF_CALL|GTF_EXCEPT)) || \
+ (((flags) & (GTF_ASG|GTF_GLOB_REF)) == (GTF_ASG|GTF_GLOB_REF)))
+
+ #define GTF_REVERSE_OPS 0x00000020 // operand op2 should be evaluated before op1 (normally, op1 is evaluated first and op2 is evaluated second)
+ #define GTF_REG_VAL 0x00000040 // operand is sitting in a register (or part of a TYP_LONG operand is sitting in a register)
+
+ #define GTF_SPILLED 0x00000080 // the value has been spilled
+ #define GTF_SPILLED_OPER 0x00000100 // op1 has been spilled
+
+#ifdef LEGACY_BACKEND
+ #define GTF_SPILLED_OP2 0x00000200 // op2 has been spilled
+#endif // LEGACY_BACKEND
+
+#ifdef DEBUG
+#ifndef LEGACY_BACKEND
+ #define GTF_MORPHED 0x00000200 // the node has been morphed (in the global morphing phase)
+#else // LEGACY_BACKEND
+ // For LEGACY_BACKEND, 0x00000200 is in use, but we can use the same value as GTF_SPILLED since we
+ // don't call gtSetEvalOrder(), which clears GTF_MORPHED, after GTF_SPILLED has been set.
+ #define GTF_MORPHED 0x00000080 // the node has been morphed (in the global morphing phase)
+#endif // LEGACY_BACKEND
+#endif // DEBUG
+
+ #define GTF_REDINDEX_CHECK 0x00000100 // Used for redundant range checks. Disjoint from GTF_SPILLED_OPER
+
+ #define GTF_ZSF_SET 0x00000400 // the zero(ZF) and sign(SF) flags set to the operand
+#if FEATURE_SET_FLAGS
+ #define GTF_SET_FLAGS 0x00000800 // Requires that codegen for this node set the flags
+ // Use gtSetFlags() to check this flags
+#endif
+ #define GTF_IND_NONFAULTING 0x00000800 // An indir that cannot fault. GTF_SET_FLAGS is not used on indirs
+
+#if FEATURE_ANYCSE
+ #define GTF_DEAD 0x00001000 // this node won't be used any more
+#endif // FEATURE_ANYCSE
+
+ #define GTF_MAKE_CSE 0x00002000 // Hoisted Expression: try hard to make this into CSE (see optPerformHoistExpr)
+ #define GTF_DONT_CSE 0x00004000 // don't bother CSE'ing this expr
+ #define GTF_COLON_COND 0x00008000 // this node is conditionally executed (part of ? :)
+
+#if defined(DEBUG) && SMALL_TREE_NODES
+ #define GTF_NODE_LARGE 0x00010000
+ #define GTF_NODE_SMALL 0x00020000
+
+ // Property of the node itself, not the gtOper
+ #define GTF_NODE_MASK (GTF_COLON_COND | GTF_MORPHED | GTF_NODE_SMALL | GTF_NODE_LARGE )
+#else
+ #define GTF_NODE_MASK (GTF_COLON_COND)
+#endif
+
+ #define GTF_BOOLEAN 0x00040000 // value is known to be 0/1
+
+ #define GTF_SMALL_OK 0x00080000 // actual small int sufficient
+
+ #define GTF_UNSIGNED 0x00100000 // with GT_CAST: the source operand is an unsigned type
+ // with operators: the specified node is an unsigned operator
+
+ #define GTF_LATE_ARG 0x00200000 // the specified node is evaluated to a temp in the arg list, and this temp is added to gtCallLateArgs.
+
+ #define GTF_SPILL 0x00400000 // needs to be spilled here
+ #define GTF_SPILL_HIGH 0x00040000 // shared with GTF_BOOLEAN
+
+ #define GTF_COMMON_MASK 0x007FFFFF // mask of all the flags above
+
+ #define GTF_REUSE_REG_VAL 0x00800000 // This is set by the register allocator on nodes whose value already exists in the
+ // register assigned to this node, so the code generator does not have to generate
+ // code to produce the value.
+ // It is currently used only on constant nodes.
+ // It CANNOT be set on var (GT_LCL*) nodes, or on indir (GT_IND or GT_STOREIND) nodes, since
+ // 1) it is not needed for lclVars and is highly unlikely to be useful for indir nodes, and
+ // 2) it conflicts with GTFD_VAR_CSE_REF for lclVars (though this is debug only, and
+ // GTF_IND_ARR_INDEX for indirs.
+
+ //---------------------------------------------------------------------
+ // The following flags can be used only with a small set of nodes, and
+ // thus their values need not be distinct (other than within the set
+ // that goes with a particular node/nodes, of course). That is, one can
+ // only test for one of these flags if the 'gtOper' value is tested as
+ // well to make sure it's the right operator for the particular flag.
+ //---------------------------------------------------------------------
+
+ // NB: GTF_VAR_* and GTF_REG_* share the same namespace of flags, because
+ // GT_LCL_VAR nodes may be changed to GT_REG_VAR nodes without resetting
+ // the flags. These are also used by GT_LCL_FLD.
+ #define GTF_VAR_DEF 0x80000000 // GT_LCL_VAR -- this is a definition
+ #define GTF_VAR_USEASG 0x40000000 // GT_LCL_VAR -- this is a use/def for a x<op>=y
+ #define GTF_VAR_USEDEF 0x20000000 // GT_LCL_VAR -- this is a use/def as in x=x+y (only the lhs x is tagged)
+ #define GTF_VAR_CAST 0x10000000 // GT_LCL_VAR -- has been explictly cast (variable node may not be type of local)
+ #define GTF_VAR_ITERATOR 0x08000000 // GT_LCL_VAR -- this is a iterator reference in the loop condition
+ #define GTF_VAR_CLONED 0x01000000 // GT_LCL_VAR -- this node has been cloned or is a clone
+ // Relevant for inlining optimizations (see fgInlinePrependStatements)
+
+ // TODO-Cleanup: Currently, GTF_REG_BIRTH is used only by stackfp
+ // We should consider using it more generally for VAR_BIRTH, instead of
+ // GTF_VAR_DEF && !GTF_VAR_USEASG
+ #define GTF_REG_BIRTH 0x04000000 // GT_REG_VAR -- enregistered variable born here
+ #define GTF_VAR_DEATH 0x02000000 // GT_LCL_VAR, GT_REG_VAR -- variable dies here (last use)
+
+ #define GTF_VAR_ARR_INDEX 0x00000020 // The variable is part of (the index portion of) an array index expression.
+ // Shares a value with GTF_REVERSE_OPS, which is meaningless for local var.
+
+ #define GTF_LIVENESS_MASK (GTF_VAR_DEF|GTF_VAR_USEASG|GTF_VAR_USEDEF|GTF_REG_BIRTH|GTF_VAR_DEATH)
+
+ #define GTF_CALL_UNMANAGED 0x80000000 // GT_CALL -- direct call to unmanaged code
+ #define GTF_CALL_INLINE_CANDIDATE 0x40000000 // GT_CALL -- this call has been marked as an inline candidate
+//
+ #define GTF_CALL_VIRT_KIND_MASK 0x30000000
+ #define GTF_CALL_NONVIRT 0x00000000 // GT_CALL -- a non virtual call
+ #define GTF_CALL_VIRT_STUB 0x10000000 // GT_CALL -- a stub-dispatch virtual call
+ #define GTF_CALL_VIRT_VTABLE 0x20000000 // GT_CALL -- a vtable-based virtual call
+
+ #define GTF_CALL_NULLCHECK 0x08000000 // GT_CALL -- must check instance pointer for null
+ #define GTF_CALL_POP_ARGS 0x04000000 // GT_CALL -- caller pop arguments?
+ #define GTF_CALL_HOISTABLE 0x02000000 // GT_CALL -- call is hoistable
+ #define GTF_CALL_REG_SAVE 0x01000000 // GT_CALL -- This call preserves all integer regs
+ // For additional flags for GT_CALL node see GTF_CALL_M_
+
+#ifdef DEBUG
+ #define GTFD_VAR_CSE_REF 0x00800000 // GT_LCL_VAR -- This is a CSE LCL_VAR node
+#endif
+
+ #define GTF_NOP_DEATH 0x40000000 // GT_NOP -- operand dies here
+
+ #define GTF_FLD_NULLCHECK 0x80000000 // GT_FIELD -- need to nullcheck the "this" pointer
+ #define GTF_FLD_VOLATILE 0x40000000 // GT_FIELD/GT_CLS_VAR -- same as GTF_IND_VOLATILE
+
+ #define GTF_INX_RNGCHK 0x80000000 // GT_INDEX -- the array reference should be range-checked.
+ #define GTF_INX_REFARR_LAYOUT 0x20000000 // GT_INDEX -- same as GTF_IND_REFARR_LAYOUT
+ #define GTF_INX_STRING_LAYOUT 0x40000000 // GT_INDEX -- this uses the special string array layout
+
+ #define GTF_IND_VOLATILE 0x40000000 // GT_IND -- the load or store must use volatile sematics (this is a nop on X86)
+ #define GTF_IND_REFARR_LAYOUT 0x20000000 // GT_IND -- the array holds object refs (only effects layout of Arrays)
+ #define GTF_IND_TGTANYWHERE 0x10000000 // GT_IND -- the target could be anywhere
+ #define GTF_IND_TLS_REF 0x08000000 // GT_IND -- the target is accessed via TLS
+ #define GTF_IND_ASG_LHS 0x04000000 // GT_IND -- this GT_IND node is (the effective val) of the LHS of an assignment; don't evaluate it independently.
+ #define GTF_IND_UNALIGNED 0x02000000 // GT_IND -- the load or store is unaligned (we assume worst case alignment of 1 byte)
+ #define GTF_IND_INVARIANT 0x01000000 // GT_IND -- the target is invariant (a prejit indirection)
+ #define GTF_IND_ARR_LEN 0x80000000 // GT_IND -- the indirection represents an array length (of the REF contribution to its argument).
+ #define GTF_IND_ARR_INDEX 0x00800000 // GT_IND -- the indirection represents an (SZ) array index (this shares the same value as GTFD_VAR_CSE_REF,
+ // but is disjoint because a GT_LCL_VAR is never an ind (GT_IND or GT_STOREIND)
+
+ #define GTF_IND_FLAGS (GTF_IND_VOLATILE|GTF_IND_REFARR_LAYOUT|GTF_IND_TGTANYWHERE|GTF_IND_NONFAULTING|\
+ GTF_IND_TLS_REF|GTF_IND_UNALIGNED|GTF_IND_INVARIANT|GTF_IND_ARR_INDEX)
+
+ #define GTF_CLS_VAR_ASG_LHS 0x04000000 // GT_CLS_VAR -- this GT_CLS_VAR node is (the effective val) of the LHS of an assignment; don't evaluate it independently.
+
+ #define GTF_ADDR_ONSTACK 0x80000000 // GT_ADDR -- this expression is guarenteed to be on the stack
+
+
+ #define GTF_ADDRMODE_NO_CSE 0x80000000 // GT_ADD/GT_MUL/GT_LSH -- Do not CSE this node only, forms complex addressing mode
+
+ #define GTF_MUL_64RSLT 0x40000000 // GT_MUL -- produce 64-bit result
+
+ #define GTF_MOD_INT_RESULT 0x80000000 // GT_MOD, -- the real tree represented by this
+ // GT_UMOD node evaluates to an int even though
+ // its type is long. The result is
+ // placed in the low member of the
+ // reg pair
+
+ #define GTF_RELOP_NAN_UN 0x80000000 // GT_<relop> -- Is branch taken if ops are NaN?
+ #define GTF_RELOP_JMP_USED 0x40000000 // GT_<relop> -- result of compare used for jump or ?:
+ #define GTF_RELOP_QMARK 0x20000000 // GT_<relop> -- the node is the condition for ?:
+ #define GTF_RELOP_SMALL 0x10000000 // GT_<relop> -- We should use a byte or short sized compare (op1->gtType is the small type)
+
+ #define GTF_QMARK_CAST_INSTOF 0x80000000 // GT_QMARK -- Is this a top (not nested) level qmark created for castclass or instanceof?
+
+ #define GTF_BOX_VALUE 0x80000000 // GT_BOX -- "box" is on a value type
+
+ #define GTF_ICON_HDL_MASK 0xF0000000 // Bits used by handle types below
+
+ #define GTF_ICON_SCOPE_HDL 0x10000000 // GT_CNS_INT -- constant is a scope handle
+ #define GTF_ICON_CLASS_HDL 0x20000000 // GT_CNS_INT -- constant is a class handle
+ #define GTF_ICON_METHOD_HDL 0x30000000 // GT_CNS_INT -- constant is a method handle
+ #define GTF_ICON_FIELD_HDL 0x40000000 // GT_CNS_INT -- constant is a field handle
+ #define GTF_ICON_STATIC_HDL 0x50000000 // GT_CNS_INT -- constant is a handle to static data
+ #define GTF_ICON_STR_HDL 0x60000000 // GT_CNS_INT -- constant is a string handle
+ #define GTF_ICON_PSTR_HDL 0x70000000 // GT_CNS_INT -- constant is a ptr to a string handle
+ #define GTF_ICON_PTR_HDL 0x80000000 // GT_CNS_INT -- constant is a ldptr handle
+ #define GTF_ICON_VARG_HDL 0x90000000 // GT_CNS_INT -- constant is a var arg cookie handle
+ #define GTF_ICON_PINVKI_HDL 0xA0000000 // GT_CNS_INT -- constant is a pinvoke calli handle
+ #define GTF_ICON_TOKEN_HDL 0xB0000000 // GT_CNS_INT -- constant is a token handle
+ #define GTF_ICON_TLS_HDL 0xC0000000 // GT_CNS_INT -- constant is a TLS ref with offset
+ #define GTF_ICON_FTN_ADDR 0xD0000000 // GT_CNS_INT -- constant is a function address
+ #define GTF_ICON_CIDMID_HDL 0xE0000000 // GT_CNS_INT -- constant is a class or module ID handle
+ #define GTF_ICON_BBC_PTR 0xF0000000 // GT_CNS_INT -- constant is a basic block count pointer
+
+ #define GTF_ICON_FIELD_OFF 0x08000000 // GT_CNS_INT -- constant is a field offset
+
+ #define GTF_BLK_HASGCPTR 0x80000000 // GT_COPYBLK -- This struct copy will copy GC Pointers
+ #define GTF_BLK_VOLATILE 0x40000000 // GT_INITBLK/GT_COPYBLK -- is a volatile block operation
+ #define GTF_BLK_UNALIGNED 0x02000000 // GT_INITBLK/GT_COPYBLK -- is an unaligned block operation
+
+ #define GTF_OVERFLOW 0x10000000 // GT_ADD, GT_SUB, GT_MUL, - Need overflow check
+ // GT_ASG_ADD, GT_ASG_SUB,
+ // GT_CAST
+ // Use gtOverflow(Ex)() to check this flag
+
+ #define GTF_NO_OP_NO 0x80000000 // GT_NO_OP --Have the codegenerator generate a special nop
+
+ //----------------------------------------------------------------
+
+ #define GTF_STMT_CMPADD 0x80000000 // GT_STMT -- added by compiler
+ #define GTF_STMT_HAS_CSE 0x40000000 // GT_STMT -- CSE def or use was subsituted
+ #define GTF_STMT_TOP_LEVEL 0x20000000 // GT_STMT -- Top-level statement - true iff gtStmtList->gtPrev == nullptr
+ // True for all stmts when in FGOrderTree
+ #define GTF_STMT_SKIP_LOWER 0x10000000 // GT_STMT -- Skip lowering if we already lowered an embedded stmt.
+
+ //----------------------------------------------------------------
+
+ GenTreePtr gtNext;
+ GenTreePtr gtPrev;
+
+#ifdef DEBUG
+ unsigned gtTreeID;
+ unsigned gtSeqNum; // liveness traversal order within the current statement
+#endif
+
+ static
+ const unsigned short gtOperKindTable[];
+
+ static
+ unsigned OperKind(unsigned gtOper)
+ {
+ assert(gtOper < GT_COUNT);
+
+ return gtOperKindTable[gtOper];
+ }
+
+ unsigned OperKind() const
+ {
+ assert(gtOper < GT_COUNT);
+
+ return gtOperKindTable[gtOper];
+ }
+
+ static bool IsExOp(unsigned opKind)
+ {
+ return (opKind & GTK_EXOP) != 0;
+ }
+ // Returns the operKind with the GTK_EX_OP bit removed (the
+ // kind of operator, unary or binary, that is extended).
+ static unsigned StripExOp(unsigned opKind)
+ {
+ return opKind & ~GTK_EXOP;
+ }
+
+ static
+ int OperIsConst(genTreeOps gtOper)
+ {
+ return (OperKind(gtOper) & GTK_CONST ) != 0;
+ }
+
+ int OperIsConst() const
+ {
+ return (OperKind(gtOper) & GTK_CONST ) != 0;
+ }
+
+ static
+ int OperIsLeaf(genTreeOps gtOper)
+ {
+ return (OperKind(gtOper) & GTK_LEAF ) != 0;
+ }
+
+ int OperIsLeaf() const
+ {
+ return (OperKind(gtOper) & GTK_LEAF ) != 0;
+ }
+
+ static
+ int OperIsCompare(genTreeOps gtOper)
+ {
+ return (OperKind(gtOper) & GTK_RELOP ) != 0;
+ }
+
+ static
+ bool OperIsLocal(genTreeOps gtOper)
+ {
+ bool result = (OperKind(gtOper) & GTK_LOCAL) != 0;
+ assert(result ==
+ (gtOper == GT_LCL_VAR ||
+ gtOper == GT_PHI_ARG ||
+ gtOper == GT_REG_VAR ||
+ gtOper == GT_LCL_FLD ||
+ gtOper == GT_STORE_LCL_VAR ||
+ gtOper == GT_STORE_LCL_FLD));
+ return result;
+ }
+
+ static
+ bool OperIsBlkOp(genTreeOps gtOper)
+ {
+ return (gtOper == GT_INITBLK ||
+ gtOper == GT_COPYBLK ||
+ gtOper == GT_COPYOBJ);
+ }
+
+ static
+ bool OperIsCopyBlkOp(genTreeOps gtOper)
+ {
+ return (gtOper == GT_COPYOBJ || gtOper == GT_COPYBLK);
+ }
+
+
+ static
+ bool OperIsLocalAddr(genTreeOps gtOper)
+ {
+ return (gtOper == GT_LCL_VAR_ADDR ||
+ gtOper == GT_LCL_FLD_ADDR);
+ }
+
+ static
+ bool OperIsScalarLocal(genTreeOps gtOper)
+ {
+ return (gtOper == GT_LCL_VAR ||
+ gtOper == GT_REG_VAR ||
+ gtOper == GT_STORE_LCL_VAR);
+ }
+
+ static
+ bool OperIsNonPhiLocal(genTreeOps gtOper)
+ {
+ return OperIsLocal(gtOper) && (gtOper != GT_PHI_ARG);
+ }
+
+ static
+ bool OperIsLocalRead(genTreeOps gtOper)
+ {
+ return (OperIsLocal(gtOper) && !OperIsLocalStore(gtOper));
+ }
+
+ static
+ bool OperIsLocalStore(genTreeOps gtOper)
+ {
+ return (gtOper == GT_STORE_LCL_VAR ||
+ gtOper == GT_STORE_LCL_FLD);
+
+ }
+
+ static
+ bool OperIsAddrMode(genTreeOps gtOper)
+ {
+ return (gtOper == GT_LEA);
+ }
+
+ bool OperIsBlkOp() const
+ {
+ return OperIsBlkOp(OperGet());
+ }
+
+ bool OperIsCopyBlkOp() const
+ {
+ return OperIsCopyBlkOp(OperGet());
+ }
+
+ bool OperIsAddrMode() const
+ {
+ return OperIsAddrMode(OperGet());
+ }
+
+ bool OperIsLocal() const
+ {
+ return OperIsLocal(OperGet());
+ }
+
+ bool OperIsLocalAddr() const
+ {
+ return OperIsLocalAddr(OperGet());
+ }
+
+ bool OperIsScalarLocal() const
+ {
+ return OperIsScalarLocal(OperGet());
+ }
+
+ bool OperIsNonPhiLocal() const
+ {
+ return OperIsNonPhiLocal(OperGet());
+ }
+
+ bool OperIsLocalStore() const
+ {
+ return OperIsLocalStore(OperGet());
+ }
+
+ bool OperIsLocalRead() const
+ {
+ return OperIsLocalRead(OperGet());
+ }
+
+ int OperIsCompare()
+ {
+ return (OperKind(gtOper) & GTK_RELOP ) != 0;
+ }
+
+ static
+ int OperIsLogical(genTreeOps gtOper)
+ {
+ return (OperKind(gtOper) & GTK_LOGOP ) != 0;
+ }
+
+ int OperIsLogical() const
+ {
+ return (OperKind(gtOper) & GTK_LOGOP ) != 0;
+ }
+
+ int OperIsArithmetic() const
+ {
+ genTreeOps op = OperGet();
+ return op==GT_ADD
+ || op==GT_SUB
+ || op==GT_MUL
+ || op==GT_DIV
+ || op==GT_MOD
+
+ || op==GT_UDIV
+ || op==GT_UMOD
+
+ || op==GT_OR
+ || op==GT_XOR
+ || op==GT_AND
+
+ || op==GT_LSH
+ || op==GT_RSH
+ || op==GT_RSZ;
+
+ }
+
+ static
+ int OperIsUnary(genTreeOps gtOper)
+ {
+ return (OperKind(gtOper) & GTK_UNOP ) != 0;
+ }
+
+ int OperIsUnary() const
+ {
+ return OperIsUnary(gtOper);
+ }
+
+ static
+ int OperIsBinary(genTreeOps gtOper)
+ {
+ return (OperKind(gtOper) & GTK_BINOP ) != 0;
+ }
+
+ int OperIsBinary() const
+ {
+ return OperIsBinary(gtOper);
+ }
+
+ static
+ int OperIsSimple(genTreeOps gtOper)
+ {
+ return (OperKind(gtOper) & GTK_SMPOP ) != 0;
+ }
+
+ static
+ int OperIsSpecial(genTreeOps gtOper)
+ {
+ return ((OperKind(gtOper) & GTK_KINDMASK) == GTK_SPECIAL);
+ }
+
+ int OperIsSimple() const
+ {
+ return OperIsSimple(gtOper);
+ }
+
+#ifdef FEATURE_SIMD
+ bool isCommutativeSIMDIntrinsic();
+#else // !
+ bool isCommutativeSIMDIntrinsic()
+ {
+ return false;
+ }
+#endif // FEATURE_SIMD
+
+ static
+ int OperIsCommutative(genTreeOps gtOper)
+ {
+ return (OperKind(gtOper) & GTK_COMMUTE) != 0;
+ }
+
+ int OperIsCommutative()
+ {
+ return OperIsCommutative(gtOper) || (OperIsSIMD(gtOper) && isCommutativeSIMDIntrinsic());
+ }
+
+ static
+ int OperIsAssignment(genTreeOps gtOper)
+ {
+ return (OperKind(gtOper) & GTK_ASGOP) != 0;
+ }
+
+ int OperIsAssignment() const
+ {
+ return OperIsAssignment(gtOper);
+ }
+
+ static
+ int OperIsIndir(genTreeOps gtOper)
+ {
+ return gtOper == GT_IND || gtOper == GT_STOREIND || gtOper == GT_NULLCHECK;
+ }
+
+ int OperIsIndir() const
+ {
+ return OperIsIndir(gtOper);
+ }
+
+ static
+ bool OperIsImplicitIndir(genTreeOps gtOper)
+ {
+ switch (gtOper)
+ {
+ case GT_LOCKADD:
+ case GT_XADD:
+ case GT_CMPXCHG:
+ case GT_COPYBLK:
+ case GT_COPYOBJ:
+ case GT_INITBLK:
+ case GT_LDOBJ:
+ case GT_BOX:
+ case GT_ARR_INDEX:
+ case GT_ARR_ELEM:
+ case GT_ARR_OFFSET:
+ return true;
+ default:
+ return false;
+ }
+ }
+
+ bool OperIsImplicitIndir() const
+ {
+ return OperIsImplicitIndir(gtOper);
+ }
+
+ bool OperIsStore() const
+ {
+ return OperIsStore(gtOper);
+ }
+
+ static
+ bool OperIsStore(genTreeOps gtOper)
+ {
+ return (gtOper == GT_STOREIND
+ || gtOper == GT_STORE_LCL_VAR
+ || gtOper == GT_STORE_LCL_FLD
+ || gtOper == GT_STORE_CLS_VAR);
+ }
+
+ static
+ int OperIsAtomicOp(genTreeOps gtOper)
+ {
+ return (gtOper == GT_XADD
+ || gtOper == GT_XCHG
+ || gtOper == GT_LOCKADD
+ || gtOper == GT_CMPXCHG);
+ }
+
+ int OperIsAtomicOp()
+ {
+ return OperIsAtomicOp(gtOper);
+ }
+
+ // This is basically here for cleaner FEATURE_SIMD #ifdefs.
+ static
+ bool OperIsSIMD(genTreeOps gtOper)
+ {
+#ifdef FEATURE_SIMD
+ return gtOper == GT_SIMD;
+#else // !FEATURE_SIMD
+ return false;
+#endif // !FEATURE_SIMD
+ }
+
+ // Requires that "op" is an op= operator. Returns
+ // the corresponding "op".
+ static
+ genTreeOps OpAsgToOper(genTreeOps op);
+
+#ifdef DEBUG
+ bool NullOp1Legal() const
+ {
+ assert(OperIsSimple(gtOper));
+ switch (gtOper)
+ {
+ case GT_PHI: case GT_LEA: case GT_RETFILT: case GT_NOP:
+ return true;
+ case GT_RETURN:
+ return gtType == TYP_VOID;
+ default:
+ return false;
+ }
+ }
+
+ bool NullOp2Legal() const
+ {
+ assert(OperIsSimple(gtOper));
+ if (!OperIsBinary(gtOper))
+ {
+ return true;
+ }
+ switch (gtOper)
+ {
+ case GT_LIST:
+ case GT_MATH:
+ case GT_LEA:
+ case GT_STOREIND:
+ case GT_INITBLK:
+ case GT_COPYBLK:
+ case GT_COPYOBJ:
+#ifdef FEATURE_SIMD
+ case GT_SIMD:
+#endif // !FEATURE_SIMD
+ return true;
+ default:
+ return false;
+ }
+ }
+
+ static
+ inline bool RequiresNonNullOp2(genTreeOps oper);
+
+#endif // DEBUG
+
+ inline bool IsZero();
+
+ inline bool IsBoxedValue();
+
+ bool IsList() const
+ {
+ return gtOper == GT_LIST;
+ }
+
+ inline GenTreePtr MoveNext();
+
+ inline GenTreePtr Current();
+
+ inline GenTreePtr *pCurrent();
+
+ inline GenTreePtr gtGetOp1();
+
+ inline GenTreePtr gtGetOp2();
+
+ // Given a tree node, if this is a child of that node, return the pointer to the child node so that it
+ // can be modified; otherwise, return null.
+ GenTreePtr* gtGetChildPointer(GenTreePtr parent);
+
+ // Get the parent of this node, and optionally capture the pointer to the child so that it can be modified.
+ GenTreePtr gtGetParent(GenTreePtr** parentChildPtrPtr);
+
+ inline GenTreePtr gtEffectiveVal(bool commaOnly = false);
+
+ // Return the child of this node if it is a GT_RELOAD or GT_COPY; otherwise simply return the node itself
+ inline GenTree* gtSkipReloadOrCopy();
+
+ bool OperMayThrow();
+
+ unsigned GetScaleIndexMul();
+ unsigned GetScaleIndexShf();
+ unsigned GetScaledIndex();
+
+ // Returns true if "addr" is a GT_ADD node, at least one of whose arguments is an integer
+ // (<= 32 bit) constant. If it returns true, it sets "*offset" to (one of the) constant value(s), and
+ // "*addr" to the other argument.
+ bool IsAddWithI32Const(GenTreePtr* addr, int* offset);
+
+ // Insert 'node' after this node in execution order.
+ void InsertAfterSelf(GenTree* node, GenTreeStmt* stmt = nullptr);
+
+public:
+
+#if SMALL_TREE_NODES
+ static
+ unsigned char s_gtNodeSizes[];
+#endif
+
+ static
+ void InitNodeSize();
+
+ size_t GetNodeSize() const;
+
+ bool IsNodeProperlySized() const;
+
+ void CopyFrom(const GenTree* src, Compiler* comp);
+
+ static
+ genTreeOps ReverseRelop(genTreeOps relop);
+
+ static
+ genTreeOps SwapRelop(genTreeOps relop);
+
+ //---------------------------------------------------------------------
+
+ static
+ bool Compare(GenTreePtr op1, GenTreePtr op2, bool swapOK = false);
+
+ //---------------------------------------------------------------------
+ #ifdef DEBUG
+ //---------------------------------------------------------------------
+
+ static
+ const char * NodeName(genTreeOps op);
+
+ //---------------------------------------------------------------------
+ #endif
+ //---------------------------------------------------------------------
+
+ bool IsNothingNode () const;
+ void gtBashToNOP ();
+
+ void SetOper (genTreeOps oper); // set gtOper
+ void SetOperResetFlags (genTreeOps oper); // set gtOper and reset flags
+
+ void ChangeOperConst (genTreeOps oper); // ChangeOper(constOper)
+ void ChangeOper (genTreeOps oper); // set gtOper and only keep GTF_COMMON_MASK flags
+ void ChangeOperUnchecked (genTreeOps oper);
+
+ bool IsLocal() const
+ {
+ return OperIsLocal(OperGet());
+ }
+
+ // Returns "true" iff "this" defines a local variable. Requires "comp" to be the
+ // current compilation. If returns "true", sets "*pLclVarTree" to the
+ // tree for the local that is defined, and, if "pIsEntire" is non-null, sets "*pIsEntire" to
+ // true or false, depending on whether the assignment writes to the entirety of the local
+ // variable, or just a portion of it.
+ bool DefinesLocal(Compiler* comp, GenTreeLclVarCommon** pLclVarTree, bool* pIsEntire = nullptr);
+
+ // Returns true if "this" represents the address of a local, or a field of a local. If returns true, sets
+ // "*pLclVarTree" to the node indicating the local variable. If the address is that of a field of this node,
+ // sets "*pFldSeq" to the field sequence representing that field, else null.
+ bool IsLocalAddrExpr(Compiler* comp, GenTreeLclVarCommon** pLclVarTree, FieldSeqNode** pFldSeq);
+
+ // Simpler variant of the above which just returns the local node if this is an expression that
+ // yields an address into a local
+ GenTreeLclVarCommon* IsLocalAddrExpr();
+
+ // If returns "true", "this" may represent the address of a static or instance field
+ // (or a field of such a field, in the case of an object field of type struct).
+ // If returns "true", then either "*pObj" is set to the object reference,
+ // or "*pStatic" is set to the baseAddr or offset to be added to the "*pFldSeq"
+ // Only one of "*pObj" or "*pStatic" will be set, the other one will be null.
+ // The boolean return value only indicates that "this" *may* be a field address
+ // -- the field sequence must also be checked.
+ // If it is a field address, the field sequence will be a sequence of length >= 1,
+ // starting with an instance or static field, and optionally continuing with struct fields.
+ bool IsFieldAddr(Compiler* comp, GenTreePtr* pObj, GenTreePtr* pStatic, FieldSeqNode** pFldSeq);
+
+ // Requires "this" to be the address of an array (the child of a GT_IND labeled with GTF_IND_ARR_INDEX).
+ // Sets "pArr" to the node representing the array (either an array object pointer, or perhaps a byref to the some element).
+ // Sets "*pArrayType" to the class handle for the array type.
+ // Sets "*inxVN" to the value number inferred for the array index.
+ // Sets "*pFldSeq" to the sequence, if any, of struct fields used to index into the array element.
+ void ParseArrayAddress(Compiler* comp,
+ struct ArrayInfo* arrayInfo,
+ GenTreePtr* pArr,
+ ValueNum* pInxVN,
+ FieldSeqNode** pFldSeq);
+
+ // Helper method for the above.
+ void ParseArrayAddressWork(Compiler* comp, ssize_t inputMul, GenTreePtr* pArr, ValueNum* pInxVN, ssize_t* pOffset, FieldSeqNode** pFldSeq);
+
+ // Requires "this" to be a GT_IND. Requires the outermost caller to set "*pFldSeq" to nullptr.
+ // Returns true if it is an array index expression, or access to a (sequence of) struct field(s)
+ // within a struct array element. If it returns true, sets *arrayInfo to the array information, and sets *pFldSeq to the sequence
+ // of struct field accesses.
+ bool ParseArrayElemForm(Compiler* comp, ArrayInfo* arrayInfo, FieldSeqNode** pFldSeq);
+
+ // Requires "this" to be the address of a (possible) array element (or struct field within that).
+ // If it is, sets "*arrayInfo" to the array access info, "*pFldSeq" to the sequence of struct fields
+ // accessed within the array element, and returns true. If not, returns "false".
+ bool ParseArrayElemAddrForm(Compiler* comp, ArrayInfo* arrayInfo, FieldSeqNode** pFldSeq);
+
+ // Requires "this" to be an int expression. If it is a sequence of one or more integer constants added together,
+ // returns true and sets "*pFldSeq" to the sequence of fields with which those constants are annotated.
+ bool ParseOffsetForm(Compiler* comp, FieldSeqNode** pFldSeq);
+
+ // Labels "*this" as an array index expression: label all constants and variables that could contribute, as part of an affine expression, to the value of the
+ // of the index.
+ void LabelIndex(Compiler* comp, bool isConst = true);
+
+ // Assumes that "this" occurs in a context where it is being dereferenced as the LHS of an assignment-like
+ // statement (assignment, initblk, or copyblk). The "width" should be the number of bytes copied by the
+ // operation. Returns "true" if "this" is an address of (or within)
+ // a local variable; sets "*pLclVarTree" to that local variable instance; and, if "pIsEntire" is non-null,
+ // sets "*pIsEntire" to true if this assignment writes the full width of the local.
+ bool DefinesLocalAddr(Compiler* comp, unsigned width, GenTreeLclVarCommon** pLclVarTree, bool* pIsEntire);
+
+ bool IsRegVar () const
+ {
+ return OperGet() == GT_REG_VAR?true:false;
+ }
+ bool InReg() const
+ {
+ return (gtFlags & GTF_REG_VAL)?true:false;
+ }
+ void SetInReg()
+ {
+ gtFlags |= GTF_REG_VAL;
+ }
+
+ regNumber GetReg() const
+ {
+ return InReg() ? gtRegNum : REG_NA;
+ }
+ bool IsRegVarDeath () const
+ {
+ assert(OperGet() == GT_REG_VAR);
+ return (gtFlags & GTF_VAR_DEATH)?true:false;
+ }
+ bool IsRegVarBirth () const
+ {
+ assert(OperGet() == GT_REG_VAR);
+ return (gtFlags & GTF_REG_BIRTH)?true:false;
+ }
+ bool IsReverseOp() const
+ {
+ return (gtFlags & GTF_REVERSE_OPS)?true:false;
+ }
+
+ inline bool IsCnsIntOrI () const;
+
+ inline bool IsIntCnsFitsInI32 ();
+
+ inline bool IsCnsFltOrDbl() const;
+
+ inline bool IsCnsNonZeroFltOrDbl();
+
+ bool IsIconHandle () const
+ {
+ assert(gtOper == GT_CNS_INT);
+ return (gtFlags & GTF_ICON_HDL_MASK) ? true : false;
+ }
+
+ bool IsIconHandle (unsigned handleType) const
+ {
+ assert(gtOper == GT_CNS_INT);
+ assert((handleType & GTF_ICON_HDL_MASK) != 0); // check that handleType is one of the valid GTF_ICON_* values
+ assert((handleType & ~GTF_ICON_HDL_MASK) == 0);
+ return (gtFlags & GTF_ICON_HDL_MASK) == handleType;
+ }
+
+ // Return just the part of the flags corresponding to the GTF_ICON_*_HDL flag. For example,
+ // GTF_ICON_SCOPE_HDL. The tree node must be a const int, but it might not be a handle, in which
+ // case we'll return zero.
+ unsigned GetIconHandleFlag () const
+ {
+ assert(gtOper == GT_CNS_INT);
+ return (gtFlags & GTF_ICON_HDL_MASK);
+ }
+
+ // Mark this node as no longer being a handle; clear its GTF_ICON_*_HDL bits.
+ void ClearIconHandleMask()
+ {
+ assert(gtOper == GT_CNS_INT);
+ gtFlags &= ~GTF_ICON_HDL_MASK;
+ }
+
+ // Return true if the two GT_CNS_INT trees have the same handle flag (GTF_ICON_*_HDL).
+ static bool SameIconHandleFlag(GenTree* t1, GenTree* t2)
+ {
+ return t1->GetIconHandleFlag() == t2->GetIconHandleFlag();
+ }
+
+ bool IsArgPlaceHolderNode() const { return OperGet() == GT_ARGPLACE; }
+ bool IsCall () const { return OperGet() == GT_CALL; }
+ bool IsStatement () const { return OperGet() == GT_STMT; }
+ inline bool IsHelperCall ();
+
+ bool IsVarAddr () const;
+ bool gtOverflow () const;
+ bool gtOverflowEx () const;
+ bool gtSetFlags () const;
+ bool gtRequestSetFlags ();
+#ifdef DEBUG
+ bool gtIsValid64RsltMul ();
+ static int gtDispFlags (unsigned flags);
+#endif
+
+ // cast operations
+ inline var_types CastFromType();
+ inline var_types& CastToType();
+
+ // Returns "true" iff "*this" is an assignment (GT_ASG) tree that defines an SSA name (lcl = phi(...));
+ bool IsPhiDefn();
+
+ // Returns "true" iff "*this" is a statement containing an assignment that defines an SSA name (lcl = phi(...));
+ bool IsPhiDefnStmt();
+
+ // Can't use an assignment operator, because we need the extra "comp" argument
+ // (to provide the allocator necessary for the VarSet assignment).
+ // TODO-Cleanup: Not really needed now, w/o liveset on tree nodes
+ void CopyTo(class Compiler* comp, const GenTree& gt);
+
+ // Like the above, excepts assumes copying from small node to small node.
+ // (Following the code it replaces, it does *not* copy the GenTree fields,
+ // which CopyTo does.)
+ void CopyToSmall(const GenTree& gt);
+
+ // Because of the fact that we hid the assignment operator of "BitSet" (in DEBUG),
+ // we can't synthesize an assignment operator.
+ // TODO-Cleanup: Could change this w/o liveset on tree nodes
+ // (This is also necessary for the VTable trick.)
+ GenTree() {}
+
+ // Returns the number of children of the current node.
+ unsigned NumChildren();
+
+ // Requires "childNum < NumChildren()". Returns the "n"th child of "this."
+ GenTreePtr GetChild(unsigned childNum);
+
+ // The maximum possible # of children of any node.
+ static const int MAX_CHILDREN = 6;
+
+ bool IsReuseRegVal() const
+ {
+ // This can be extended to non-constant nodes, but not to local or indir nodes.
+ if(OperIsConst() && ((gtFlags & GTF_REUSE_REG_VAL) != 0))
+ {
+ return true;
+ }
+ return false;
+ }
+ void SetReuseRegVal()
+ {
+ assert(OperIsConst());
+ gtFlags |= GTF_REUSE_REG_VAL;
+ }
+ void ResetReuseRegVal()
+ {
+ assert(OperIsConst());
+ gtFlags &= ~GTF_REUSE_REG_VAL;
+ }
+
+#ifdef DEBUG
+ private:
+ GenTree& operator=(const GenTree& gt) {
+ }
+#endif // DEBUG
+
+#if DEBUGGABLE_GENTREE
+ // In DEBUG builds, add a dummy virtual method, to give the debugger run-time type information.
+ virtual void DummyVirt() {}
+
+ typedef void* VtablePtr;
+
+ VtablePtr GetVtableForOper(genTreeOps oper);
+ void SetVtableForOper(genTreeOps oper);
+
+ static VtablePtr s_vtablesForOpers[GT_COUNT];
+ static VtablePtr s_vtableForOp;
+#endif // DEBUGGABLE_GENTREE
+
+ public:
+ inline void* operator new(size_t sz, class Compiler*, genTreeOps oper);
+
+ inline GenTree(genTreeOps oper, var_types type
+ DEBUG_ARG(bool largeNode = false));
+};
+
+
+/*****************************************************************************/
+// In the current design, we never instantiate GenTreeUnOp: it exists only to be
+// used as a base class. For unary operators, we instantiate GenTreeOp, with a NULL second
+// argument. We check that this is true dynamically. We could tighten this and get static
+// checking, but that would entail accessing the first child of a unary operator via something
+// like gtUnOp.gtOp1 instead of gtOp.gtOp1.
+struct GenTreeUnOp: public GenTree
+{
+ GenTreePtr gtOp1;
+
+protected:
+ GenTreeUnOp(genTreeOps oper, var_types type
+ DEBUG_ARG(bool largeNode = false)) :
+ GenTree(oper, type
+ DEBUG_ARG(largeNode)),
+ gtOp1(nullptr)
+ {}
+
+ GenTreeUnOp(genTreeOps oper, var_types type, GenTreePtr op1
+ DEBUG_ARG(bool largeNode = false)) :
+ GenTree(oper, type
+ DEBUG_ARG(largeNode)),
+ gtOp1(op1)
+ {
+ assert(op1 != nullptr || NullOp1Legal());
+ if (op1 != nullptr) // Propagate effects flags from child.
+ gtFlags |= op1->gtFlags & GTF_ALL_EFFECT;
+ }
+
+#if DEBUGGABLE_GENTREE
+ GenTreeUnOp() : GenTree(), gtOp1(nullptr) {}
+#endif
+};
+
+struct GenTreeOp: public GenTreeUnOp
+{
+ GenTreePtr gtOp2;
+
+ GenTreeOp(genTreeOps oper, var_types type, GenTreePtr op1, GenTreePtr op2
+ DEBUG_ARG(bool largeNode = false)) :
+ GenTreeUnOp(oper, type, op1
+ DEBUG_ARG(largeNode)),
+ gtOp2(op2)
+ {
+ // comparisons are always integral types
+ assert(!GenTree::OperIsCompare(oper) || varTypeIsIntegral(type));
+ // Binary operators, with a few exceptions, require a non-nullptr
+ // second argument.
+ assert(op2 != nullptr || NullOp2Legal());
+ // Unary operators, on the other hand, require a null second argument.
+ assert(!OperIsUnary(oper) || op2 == nullptr);
+ // Propagate effects flags from child. (UnOp handled this for first child.)
+ if (op2 != nullptr)
+ {
+ gtFlags |= op2->gtFlags & GTF_ALL_EFFECT;
+ }
+ }
+
+ // A small set of types are unary operators with optional arguments. We use
+ // this constructor to build those.
+ GenTreeOp(genTreeOps oper, var_types type
+ DEBUG_ARG(bool largeNode = false)) :
+ GenTreeUnOp(oper, type
+ DEBUG_ARG(largeNode)),
+ gtOp2(nullptr)
+ {
+ // Unary operators with optional arguments:
+ assert(oper == GT_NOP ||
+ oper == GT_RETURN ||
+ oper == GT_RETFILT ||
+ OperIsBlkOp(oper));
+ }
+
+#if DEBUGGABLE_GENTREE
+ GenTreeOp() : GenTreeUnOp(), gtOp2(nullptr) {}
+#endif
+};
+
+
+struct GenTreeVal: public GenTree
+{
+ size_t gtVal1;
+
+ GenTreeVal(genTreeOps oper, var_types type, ssize_t val) :
+ GenTree(oper, type),
+ gtVal1(val)
+ {}
+#if DEBUGGABLE_GENTREE
+ GenTreeVal() : GenTree() {}
+#endif
+};
+
+struct GenTreeIntConCommon: public GenTree
+{
+ inline INT64 LngValue();
+ inline void SetLngValue(INT64 val);
+ inline ssize_t IconValue();
+ inline void SetIconValue(ssize_t val);
+
+ GenTreeIntConCommon(genTreeOps oper, var_types type
+ DEBUG_ARG(bool largeNode = false)) :
+ GenTree(oper, type
+ DEBUG_ARG(largeNode))
+ {}
+
+#ifdef _TARGET_XARCH_
+ bool FitsInAddrBase(Compiler *comp);
+#endif
+
+#if DEBUGGABLE_GENTREE
+ GenTreeIntConCommon() : GenTree() {}
+#endif
+};
+
+// node representing a read from a physical register
+struct GenTreePhysReg: public GenTree
+{
+ // physregs need a field beyond gtRegNum because
+ // gtRegNum indicates the destination (and can be changed)
+ // whereas reg indicates the source
+ regNumber gtSrcReg;
+ GenTreePhysReg(regNumber r, var_types type=TYP_I_IMPL) :
+ GenTree(GT_PHYSREG, type), gtSrcReg(r)
+ {
+ }
+#if DEBUGGABLE_GENTREE
+ GenTreePhysReg() : GenTree() {}
+#endif
+};
+
+#ifndef LEGACY_BACKEND
+// gtJumpTable - Switch Jump Table
+//
+// This node stores a DWORD constant that represents the
+// absolute address of a jump table for switches. The code
+// generator uses this table to code the destination for every case
+// in an array of addresses which starting position is stored in
+// this constant.
+struct GenTreeJumpTable : public GenTreeIntConCommon
+{
+ ssize_t gtJumpTableAddr;
+
+ GenTreeJumpTable(var_types type
+ DEBUG_ARG(bool largeNode = false)) :
+ GenTreeIntConCommon(GT_JMPTABLE, type
+ DEBUG_ARG(largeNode))
+ {}
+#if DEBUGGABLE_GENTREE
+ GenTreeJumpTable() : GenTreeIntConCommon() {}
+#endif // DEBUG
+};
+#endif // !LEGACY_BACKEND
+
+/* gtIntCon -- integer constant (GT_CNS_INT) */
+struct GenTreeIntCon: public GenTreeIntConCommon
+{
+ /*
+ * This is the GT_CNS_INT struct definition.
+ * It's used to hold for both int constants and pointer handle constants.
+ * For the 64-bit targets we will only use GT_CNS_INT as it used to represent all the possible sizes
+ * For the 32-bit targets we use a GT_CNS_LNG to hold a 64-bit integer constant and GT_CNS_INT for all others.
+ * In the future when we retarget the JIT for x86 we should consider eliminating GT_CNS_LNG
+ */
+ ssize_t gtIconVal; // Must overlap and have the same offset with the gtIconVal field in GenTreeLngCon below.
+
+ /* The InitializeArray intrinsic needs to go back to the newarray statement
+ to find the class handle of the array so that we can get its size. However,
+ in ngen mode, the handle in that statement does not correspond to the compile
+ time handle (rather it lets you get a handle at run-time). In that case, we also
+ need to store a compile time handle, which goes in this gtCompileTimeHandle field.
+ */
+ ssize_t gtCompileTimeHandle;
+
+ // TODO-Cleanup: It's not clear what characterizes the cases where the field
+ // above is used. It may be that its uses and those of the "gtFieldSeq" field below
+ // are mutually exclusive, and they could be put in a union. Or else we should separate
+ // this type into three subtypes.
+
+ // If this constant represents the offset of one or more fields, "gtFieldSeq" represents that
+ // sequence of fields.
+ FieldSeqNode* gtFieldSeq;
+
+#if defined (LATE_DISASM)
+
+ /* If the constant was morphed from some other node,
+ these fields enable us to get back to what the node
+ originally represented. See use of gtNewIconHandleNode()
+ */
+
+ union
+ {
+ /* Template struct - The significant field of the other
+ * structs should overlap exactly with this struct
+ */
+
+ struct
+ {
+ unsigned gtIconHdl1;
+ void * gtIconHdl2;
+ }
+ gtIconHdl;
+
+ /* GT_FIELD, etc */
+
+ struct
+ {
+ unsigned gtIconCPX;
+ CORINFO_CLASS_HANDLE gtIconCls;
+ }
+ gtIconFld;
+ };
+#endif
+
+ GenTreeIntCon(var_types type, ssize_t value
+ DEBUG_ARG(bool largeNode = false)) :
+ GenTreeIntConCommon(GT_CNS_INT, type
+ DEBUG_ARG(largeNode)),
+ gtIconVal(value),
+ gtCompileTimeHandle(0),
+ gtFieldSeq(FieldSeqStore::NotAField())
+ {}
+
+ GenTreeIntCon(var_types type, ssize_t value, FieldSeqNode* fields
+ DEBUG_ARG(bool largeNode = false)) :
+ GenTreeIntConCommon(GT_CNS_INT, type
+ DEBUG_ARG(largeNode)),
+ gtIconVal(value),
+ gtCompileTimeHandle(0),
+ gtFieldSeq(fields)
+ {
+ assert(fields != NULL);
+ }
+
+#if DEBUGGABLE_GENTREE
+ GenTreeIntCon() : GenTreeIntConCommon() {}
+#endif
+};
+
+
+/* gtLngCon -- long constant (GT_CNS_LNG) */
+
+struct GenTreeLngCon: public GenTreeIntConCommon
+{
+ INT64 gtLconVal; // Must overlap and have the same offset with the gtIconVal field in GenTreeIntCon above.
+ INT32 LoVal()
+ {
+ return (INT32)(gtLconVal & 0xffffffff);
+ }
+
+ INT32 HiVal()
+ {
+ return (INT32)(gtLconVal >> 32);;
+ }
+
+ GenTreeLngCon(INT64 val) :
+ GenTreeIntConCommon(GT_CNS_NATIVELONG, TYP_LONG)
+ { SetLngValue(val); }
+#if DEBUGGABLE_GENTREE
+ GenTreeLngCon() : GenTreeIntConCommon() {}
+#endif
+};
+
+
+inline INT64 GenTreeIntConCommon::LngValue()
+{
+#ifndef _TARGET_64BIT_
+ assert(gtOper == GT_CNS_LNG);
+ return AsLngCon()->gtLconVal;
+#else
+ return IconValue();
+#endif
+}
+
+inline void GenTreeIntConCommon::SetLngValue(INT64 val)
+{
+#ifndef _TARGET_64BIT_
+ assert(gtOper == GT_CNS_LNG);
+ AsLngCon()->gtLconVal = val;
+#else
+ // Compile time asserts that these two fields overlap and have the same offsets: gtIconVal and gtLconVal
+ C_ASSERT(offsetof(GenTreeLngCon, gtLconVal) == offsetof(GenTreeIntCon, gtIconVal));
+ C_ASSERT(sizeof(AsLngCon()->gtLconVal) == sizeof(AsIntCon()->gtIconVal));
+
+ SetIconValue(ssize_t(val));
+#endif
+}
+
+inline ssize_t GenTreeIntConCommon::IconValue()
+{
+ assert(gtOper == GT_CNS_INT); // We should never see a GT_CNS_LNG for a 64-bit target!
+ return AsIntCon()->gtIconVal;
+}
+
+inline void GenTreeIntConCommon::SetIconValue(ssize_t val)
+{
+ assert(gtOper == GT_CNS_INT); // We should never see a GT_CNS_LNG for a 64-bit target!
+ AsIntCon()->gtIconVal = val;
+}
+
+/* gtDblCon -- double constant (GT_CNS_DBL) */
+
+struct GenTreeDblCon: public GenTree
+{
+ double gtDconVal;
+
+ bool isBitwiseEqual(GenTreeDblCon* other)
+ {
+ unsigned __int64 bits = *(unsigned __int64 *)(&gtDconVal);
+ unsigned __int64 otherBits = *(unsigned __int64 *)(&(other->gtDconVal));
+ return (bits == otherBits);
+ }
+
+ GenTreeDblCon(double val) :
+ GenTree(GT_CNS_DBL, TYP_DOUBLE),
+ gtDconVal(val)
+ {}
+#if DEBUGGABLE_GENTREE
+ GenTreeDblCon() : GenTree() {}
+#endif
+};
+
+
+/* gtStrCon -- string constant (GT_CNS_STR) */
+
+struct GenTreeStrCon: public GenTree
+{
+ unsigned gtSconCPX;
+ CORINFO_MODULE_HANDLE gtScpHnd;
+
+ // Because this node can come from an inlined method we need to
+ // have the scope handle, since it will become a helper call.
+ GenTreeStrCon(unsigned sconCPX, CORINFO_MODULE_HANDLE mod
+ DEBUG_ARG(bool largeNode = false)) :
+ GenTree(GT_CNS_STR, TYP_REF
+ DEBUG_ARG(largeNode)),
+ gtSconCPX(sconCPX), gtScpHnd(mod)
+ {}
+#if DEBUGGABLE_GENTREE
+ GenTreeStrCon() : GenTree() {}
+#endif
+};
+
+// Common supertype of LCL_VAR, LCL_FLD, REG_VAR, PHI_ARG
+// This inherits from UnOp because lclvar stores are Unops
+struct GenTreeLclVarCommon: public GenTreeUnOp
+{
+private:
+ unsigned _gtLclNum; // The local number. An index into the Compiler::lvaTable array.
+ unsigned _gtSsaNum; // The SSA number.
+
+public:
+ GenTreeLclVarCommon(genTreeOps oper, var_types type, unsigned lclNum
+ DEBUG_ARG(bool largeNode = false)) :
+ GenTreeUnOp(oper, type
+ DEBUG_ARG(largeNode))
+ {
+ SetLclNum(lclNum);
+ }
+
+ unsigned GetLclNum() const
+ {
+ return _gtLclNum;
+ }
+ __declspec(property(get=GetLclNum)) unsigned gtLclNum;
+
+ void SetLclNum(unsigned lclNum)
+ {
+ _gtLclNum = lclNum;
+ _gtSsaNum = SsaConfig::RESERVED_SSA_NUM;
+ }
+
+ unsigned GetSsaNum() const
+ {
+ return _gtSsaNum;
+ }
+ __declspec(property(get=GetSsaNum)) unsigned gtSsaNum;
+
+ void SetSsaNum(unsigned ssaNum)
+ {
+ _gtSsaNum = ssaNum;
+ }
+
+ bool HasSsaName()
+ {
+ return (gtSsaNum != SsaConfig::RESERVED_SSA_NUM);
+ }
+
+#if DEBUGGABLE_GENTREE
+ GenTreeLclVarCommon() : GenTreeUnOp() {}
+#endif
+};
+
+// gtLclVar -- load/store/addr of local variable
+
+struct GenTreeLclVar: public GenTreeLclVarCommon
+{
+ IL_OFFSET gtLclILoffs; // instr offset of ref (only for debug info)
+
+ GenTreeLclVar(var_types type, unsigned lclNum, IL_OFFSET ilOffs
+ DEBUG_ARG(bool largeNode = false)) :
+ GenTreeLclVarCommon(GT_LCL_VAR, type, lclNum
+ DEBUG_ARG(largeNode)),
+ gtLclILoffs(ilOffs)
+ {}
+
+ GenTreeLclVar(genTreeOps oper, var_types type, unsigned lclNum, IL_OFFSET ilOffs
+ DEBUG_ARG(bool largeNode = false)) :
+ GenTreeLclVarCommon(oper, type, lclNum
+ DEBUG_ARG(largeNode)),
+ gtLclILoffs(ilOffs)
+ {
+ assert(OperIsLocal(oper));
+ }
+
+#if DEBUGGABLE_GENTREE
+ GenTreeLclVar() : GenTreeLclVarCommon() {}
+#endif
+};
+
+// gtLclFld -- load/store/addr of local variable field
+
+struct GenTreeLclFld: public GenTreeLclVarCommon
+{
+ unsigned gtLclOffs; // offset into the variable to access
+
+ FieldSeqNode* gtFieldSeq; // This LclFld node represents some sequences of accesses.
+
+ // old/FE style constructor where load/store/addr share same opcode
+ GenTreeLclFld(var_types type, unsigned lclNum, unsigned lclOffs) :
+ GenTreeLclVarCommon(GT_LCL_FLD, type, lclNum),
+ gtLclOffs(lclOffs), gtFieldSeq(NULL)
+ {
+ assert(sizeof(*this) <= s_gtNodeSizes[GT_LCL_FLD]);
+ }
+
+
+ GenTreeLclFld(genTreeOps oper, var_types type, unsigned lclNum, unsigned lclOffs) :
+ GenTreeLclVarCommon(oper, type, lclNum),
+ gtLclOffs(lclOffs), gtFieldSeq(NULL)
+ {
+ assert(sizeof(*this) <= s_gtNodeSizes[GT_LCL_FLD]);
+ }
+#if DEBUGGABLE_GENTREE
+ GenTreeLclFld() : GenTreeLclVarCommon() {}
+#endif
+};
+
+struct GenTreeRegVar: public GenTreeLclVarCommon
+{
+ // TODO-Cleanup: Note that the base class GenTree already has a gtRegNum field.
+ // It's not clear exactly why a GT_REG_VAR has a separate field. When
+ // GT_REG_VAR is created, the two are identical. It appears that they may
+ // or may not remain so. In particular, there is a comment in stackfp.cpp
+ // that states:
+ //
+ // There used to be an assertion: assert(src->gtRegNum == src->gtRegVar.gtRegNum, ...)
+ // here, but there's actually no reason to assume that. AFAICT, for FP vars under stack FP,
+ // src->gtRegVar.gtRegNum is the allocated stack pseudo-register, but src->gtRegNum is the
+ // FP stack position into which that is loaded to represent a particular use of the variable.
+ //
+ // It might be the case that only for stackfp do they ever differ.
+ //
+ // The following might be possible: the GT_REG_VAR node has a last use prior to a complex
+ // subtree being evaluated. It could then be spilled from the register. Later,
+ // it could be unspilled into a different register, which would be recorded at
+ // the unspill time in the GenTree::gtRegNum, whereas GenTreeRegVar::gtRegNum
+ // is left alone. It's not clear why that is useful.
+ //
+ // Assuming there is a particular use, like stack fp, that requires it, maybe we
+ // can get rid of GT_REG_VAR and just leave it as GT_LCL_VAR, using the base class gtRegNum field.
+ // If we need it for stackfp, we could add a GenTreeStackFPRegVar type, which carries both the
+ // pieces of information, in a clearer and more specific way (in particular, with
+ // a different member name).
+ //
+
+private:
+
+ regNumberSmall _gtRegNum;
+
+public:
+
+ GenTreeRegVar(var_types type, unsigned lclNum, regNumber regNum) :
+ GenTreeLclVarCommon(GT_REG_VAR, type, lclNum
+ )
+ {
+ gtRegNum = regNum;
+ }
+
+ // The register number is stored in a small format (8 bits), but the getters return and the setters take
+ // a full-size (unsigned) format, to localize the casts here.
+
+ __declspec(property(get=GetRegNum,put=SetRegNum))
+ regNumber gtRegNum;
+
+ regNumber GetRegNum() const
+ {
+ return (regNumber) _gtRegNum;
+ }
+
+ void SetRegNum(regNumber reg)
+ {
+ _gtRegNum = (regNumberSmall) reg;
+ assert(_gtRegNum == reg);
+ }
+
+#if DEBUGGABLE_GENTREE
+ GenTreeRegVar() : GenTreeLclVarCommon() {}
+#endif
+};
+
+/* gtCast -- conversion to a different type (GT_CAST) */
+
+struct GenTreeCast: public GenTreeOp
+{
+ GenTreePtr& CastOp() { return gtOp1; }
+ var_types gtCastType;
+
+ GenTreeCast(var_types type, GenTreePtr op, var_types castType
+ DEBUG_ARG(bool largeNode = false)) :
+ GenTreeOp(GT_CAST, type, op, nullptr
+ DEBUG_ARG(largeNode)),
+ gtCastType(castType)
+ {}
+#if DEBUGGABLE_GENTREE
+ GenTreeCast() : GenTreeOp() {}
+#endif
+};
+
+
+// GT_BOX nodes are place markers for boxed values. The "real" tree
+// for most purposes is in gtBoxOp.
+struct GenTreeBox: public GenTreeUnOp
+{
+ // An expanded helper call to implement the "box" if we don't get
+ // rid of it any other way. Must be in same position as op1.
+
+ GenTreePtr& BoxOp() { return gtOp1; }
+ // This is the statement that contains the assignment tree when the node is an inlined GT_BOX on a value
+ // type
+ GenTreePtr gtAsgStmtWhenInlinedBoxValue;
+
+ GenTreeBox(var_types type, GenTreePtr boxOp, GenTreePtr asgStmtWhenInlinedBoxValue) :
+ GenTreeUnOp(GT_BOX, type, boxOp),
+ gtAsgStmtWhenInlinedBoxValue(asgStmtWhenInlinedBoxValue)
+ {}
+#if DEBUGGABLE_GENTREE
+ GenTreeBox() : GenTreeUnOp() {}
+#endif
+};
+
+
+
+/* gtField -- data member ref (GT_FIELD) */
+
+struct GenTreeField: public GenTree
+{
+ GenTreePtr gtFldObj;
+ CORINFO_FIELD_HANDLE gtFldHnd;
+ DWORD gtFldOffset;
+#ifdef FEATURE_READYTORUN_COMPILER
+ CORINFO_CONST_LOOKUP gtFieldLookup;
+#endif
+
+ GenTreeField(var_types type) :
+ GenTree(GT_FIELD, type
+ )
+ {}
+#if DEBUGGABLE_GENTREE
+ GenTreeField() : GenTree() {}
+#endif
+};
+
+// Represents the Argument list of a call node, as a Lisp-style linked list.
+// (Originally I had hoped that this could have *only* the m_arg/m_rest fields, but it turns out
+// that enough of the GenTree mechanism is used that it makes sense just to make it a subtype. But
+// note that in many ways, this is *not* a "real" node of the tree, but rather a mechanism for
+// giving call nodes a flexible number of children. GenTreeArgListNodes never evaluate to registers,
+// for example.)
+
+// Note that while this extends GenTreeOp, it is *not* an EXOP. We don't add any new fields, and one
+// is free to allocate a GenTreeOp of type GT_LIST. If you use this type, you get the convenient Current/Rest
+// method names for the arguments.
+struct GenTreeArgList: public GenTreeOp
+{
+ GenTreePtr& Current() { return gtOp1; }
+ GenTreeArgList*& Rest() { assert(gtOp2 == NULL || gtOp2->OperGet() == GT_LIST); return *reinterpret_cast<GenTreeArgList**>(&gtOp2); }
+
+#if DEBUGGABLE_GENTREE
+ GenTreeArgList() : GenTreeOp() {}
+#endif
+
+ GenTreeArgList(GenTreePtr arg ) :
+ GenTreeOp(GT_LIST, TYP_VOID, arg, NULL)
+ {}
+
+ GenTreeArgList(GenTreePtr arg, GenTreeArgList* rest) :
+ GenTreeOp(GT_LIST, TYP_VOID, arg, rest)
+ {
+ assert (arg != NULL);
+ gtFlags |= arg->gtFlags & GTF_ALL_EFFECT;
+ if (rest != NULL) gtFlags |= rest->gtFlags & GTF_ALL_EFFECT;
+ }
+};
+
+// There was quite a bit of confusion in the code base about which of gtOp1 and gtOp2 was the
+// 'then' and 'else' clause of a colon node. Adding these accessors, while not enforcing anything,
+// at least *allows* the programmer to be obviously correct.
+// However, these conventions seem backward.
+// TODO-Cleanup: If we could get these accessors used everywhere, then we could switch them.
+struct GenTreeColon: public GenTreeOp
+{
+ GenTreePtr& ThenNode() { return gtOp2; }
+ GenTreePtr& ElseNode() { return gtOp1; }
+
+#if DEBUGGABLE_GENTREE
+ GenTreeColon() : GenTreeOp() {}
+#endif
+
+ GenTreeColon(var_types typ, GenTreePtr thenNode, GenTreePtr elseNode) :
+ GenTreeOp(GT_COLON, typ, elseNode, thenNode)
+ {}
+};
+
+/* gtCall -- method call (GT_CALL) */
+typedef class fgArgInfo * fgArgInfoPtr;
+
+struct GenTreeCall: public GenTree
+{
+ GenTreePtr gtCallObjp; // The instance argument ('this' pointer)
+ GenTreeArgList* gtCallArgs; // The list of arguments in original evaluation order
+ GenTreeArgList* gtCallLateArgs; // On x86: The register arguments in an optimal order
+ // On ARM/x64: - also includes any outgoing arg space arguments
+ // - that were evaluated into a temp LclVar
+ fgArgInfoPtr fgArgInfo;
+
+#if !FEATURE_FIXED_OUT_ARGS
+ int regArgListCount;
+ regList regArgList;
+#endif
+
+ // TODO-Throughput: Revisit this (this used to be only defined if
+ // FEATURE_FIXED_OUT_ARGS was enabled, so this makes GenTreeCall 4 bytes bigger on x86).
+ CORINFO_SIG_INFO* callSig; // Used by tail calls and to register callsites with the EE
+
+ regMaskTP gtCallRegUsedMask; // mask of registers used to pass parameters
+
+#define GTF_CALL_M_EXPLICIT_TAILCALL 0x0001 // GT_CALL -- the call is "tail" prefixed and importer has performed tail call checks
+#define GTF_CALL_M_TAILCALL 0x0002 // GT_CALL -- the call is a tailcall
+#define GTF_CALL_M_VARARGS 0x0004 // GT_CALL -- the call uses varargs ABI
+#define GTF_CALL_M_RETBUFFARG 0x0008 // GT_CALL -- first parameter is the return buffer argument
+#define GTF_CALL_M_DELEGATE_INV 0x0010 // GT_CALL -- call to Delegate.Invoke
+#define GTF_CALL_M_NOGCCHECK 0x0020 // GT_CALL -- not a call for computing full interruptability
+#define GTF_CALL_M_SPECIAL_INTRINSIC 0x0040 // GT_CALL -- function that could be optimized as an intrinsic
+ // in special cases. Used to optimize fast way out in morphing
+#define GTF_CALL_M_UNMGD_THISCALL 0x0080 // "this" pointer (first argument) should be enregistered (only for GTF_CALL_UNMANAGED)
+#define GTF_CALL_M_VIRTSTUB_REL_INDIRECT 0x0080 // the virtstub is indirected through a relative address (only for GTF_CALL_VIRT_STUB)
+#define GTF_CALL_M_NONVIRT_SAME_THIS 0x0080 // callee "this" pointer is equal to caller this pointer (only for GTF_CALL_NONVIRT)
+#define GTF_CALL_M_FRAME_VAR_DEATH 0x0100 // GT_CALL -- the compLvFrameListRoot variable dies here (last use)
+
+#ifndef LEGACY_BACKEND
+#define GTF_CALL_M_TAILCALL_VIA_HELPER 0x0200 // GT_CALL -- call is a tail call dispatched via tail call JIT helper.
+#endif // !LEGACY_BACKEND
+
+#if FEATURE_TAILCALL_OPT
+#define GTF_CALL_M_IMPLICIT_TAILCALL 0x0400 // GT_CALL -- call is an opportunistic tail call and importer has performed tail call checks
+#endif
+
+#define GTF_CALL_M_PINVOKE 0x0800 // GT_CALL -- call is a pinvoke. This mirrors VM flag CORINFO_FLG_PINVOKE.
+ // A call marked as Pinvoke is not necessarily a GT_CALL_UNMANAGED. For e.g.
+ // an IL Stub dynamically generated for a PInvoke declaration is flagged as
+ // a Pinvoke but not as an unmanaged call. See impCheckForPInvokeCall() to
+ // know when these flags are set.
+
+ bool IsUnmanaged() { return (gtFlags & GTF_CALL_UNMANAGED) != 0; }
+ bool NeedsNullCheck() { return (gtFlags & GTF_CALL_NULLCHECK) != 0; }
+ bool CallerPop() { return (gtFlags & GTF_CALL_POP_ARGS) != 0; }
+ bool IsVirtual() { return (gtFlags & GTF_CALL_VIRT_KIND_MASK) != GTF_CALL_NONVIRT; }
+ bool IsVirtualStub() { return (gtFlags & GTF_CALL_VIRT_KIND_MASK) == GTF_CALL_VIRT_STUB; }
+ bool IsVirtualVtable() { return (gtFlags & GTF_CALL_VIRT_KIND_MASK) == GTF_CALL_VIRT_VTABLE; }
+ bool IsInlineCandidate() { return (gtFlags & GTF_CALL_INLINE_CANDIDATE) != 0; }
+
+#ifndef LEGACY_BACKEND
+ // Whether the method has non-standard args (i.e. passed in R10 or R11)
+ // See fgMorphArgs() to know the call types for which non-standard args are inserted.
+ bool HasNonStandardArgs() { return IsUnmanaged() || (gtCallType == CT_INDIRECT && (IsVirtualStub() || gtCallCookie)); }
+
+ // Get the count of non-standard arg count
+ int GetNonStandardArgCount()
+ {
+ if (IsUnmanaged())
+ {
+ // R11 = PInvoke cookie param
+ return 1;
+ }
+ else if (gtCallType == CT_INDIRECT)
+ {
+ if (IsVirtualStub())
+ {
+ // R11 = Virtual stub param
+ return 1;
+ }
+ else if (gtCallCookie != nullptr)
+ {
+ // R10 = PInvoke target param
+ // R11 = PInvoke cookie param
+ return 2;
+ }
+ }
+
+ return 0;
+ }
+#endif // !LEGACY_BACKEND
+
+ // Returns true if VM has flagged this method as CORINFO_FLG_PINVOKE.
+ bool IsPInvoke() { return (gtCallMoreFlags & GTF_CALL_M_PINVOKE) != 0; }
+
+ // Note that the distinction of whether tail prefixed or an implicit tail call
+ // is maintained on a call node till fgMorphCall() after which it will be
+ // either a tail call (i.e. IsTailCall() is true) or a non-tail call.
+ bool IsTailPrefixedCall() { return (gtCallMoreFlags & GTF_CALL_M_EXPLICIT_TAILCALL) != 0; }
+
+ // This method returning "true" implies that tail call flowgraph morhphing has
+ // performed final checks and committed to making a tail call.
+ bool IsTailCall() { return (gtCallMoreFlags & GTF_CALL_M_TAILCALL) != 0; }
+
+ // This method returning "true" implies that importer has performed tail call checks
+ // and providing a hint that this can be converted to a tail call.
+ bool CanTailCall() { return IsTailPrefixedCall() || IsImplicitTailCall(); }
+
+#ifndef LEGACY_BACKEND
+ bool IsTailCallViaHelper() { return IsTailCall() && (gtCallMoreFlags & GTF_CALL_M_TAILCALL_VIA_HELPER); }
+#else // LEGACY_BACKEND
+ bool IsTailCallViaHelper() { return true; }
+#endif // LEGACY_BACKEND
+
+#if FEATURE_FASTTAILCALL
+ bool IsFastTailCall() { return IsTailCall() && !(gtCallMoreFlags & GTF_CALL_M_TAILCALL_VIA_HELPER); }
+#else // !FEATURE_FASTTAILCALL
+ bool IsFastTailCall() { return false; }
+#endif // !FEATURE_FASTTAILCALL
+
+#if FEATURE_TAILCALL_OPT
+ // Returns true if this is marked for opportunistic tail calling.
+ // That is, can be tail called though not explicitly prefixed with "tail" prefix.
+ bool IsImplicitTailCall() { return (gtCallMoreFlags & GTF_CALL_M_IMPLICIT_TAILCALL) != 0; }
+#else // !FEATURE_TAILCALL_OPT
+ bool IsImplicitTailCall() { return false; }
+#endif // !FEATURE_TAILCALL_OPT
+
+ bool IsSameThis() { return (gtCallMoreFlags & GTF_CALL_M_NONVIRT_SAME_THIS) != 0; }
+ bool IsDelegateInvoke(){ return (gtCallMoreFlags & GTF_CALL_M_DELEGATE_INV) != 0; }
+ bool IsVirtualStubRelativeIndir() { return (gtCallMoreFlags & GTF_CALL_M_VIRTSTUB_REL_INDIRECT) != 0; }
+ bool IsVarargs() { return (gtCallMoreFlags & GTF_CALL_M_VARARGS) != 0; }
+
+ unsigned short gtCallMoreFlags; // in addition to gtFlags
+
+ unsigned char gtCallType :3; // value from the gtCallTypes enumeration
+ unsigned char gtReturnType :5; // exact return type
+
+ CORINFO_CLASS_HANDLE gtRetClsHnd; // The return type handle of the call if it is a struct; used for HFAs.
+
+ union
+ {
+ // only used for CALLI unmanaged calls (CT_INDIRECT)
+ GenTreePtr gtCallCookie;
+ // gtInlineCandidateInfo is only used when inlining methods
+ InlineCandidateInfo * gtInlineCandidateInfo;
+ void * gtStubCallStubAddr; // GTF_CALL_VIRT_STUB - these are never inlined
+ CORINFO_GENERIC_HANDLE compileTimeHelperArgumentHandle; // Used to track type handle argument of dynamic helpers
+ };
+
+ // expression evaluated after args are placed which determines the control target
+ GenTree * gtControlExpr;
+
+ union
+ {
+ CORINFO_METHOD_HANDLE gtCallMethHnd; // CT_USER_FUNC
+ GenTreePtr gtCallAddr; // CT_INDIRECT
+ };
+
+#ifdef FEATURE_READYTORUN_COMPILER
+ // Call target lookup info for method call from a Ready To Run module
+ CORINFO_CONST_LOOKUP gtEntryPoint;
+#endif
+
+ GenTreeCall(var_types type) :
+ GenTree(GT_CALL, type)
+ {}
+#if DEBUGGABLE_GENTREE
+ GenTreeCall() : GenTree() {}
+#endif
+};
+
+struct GenTreeCmpXchg: public GenTree
+{
+ GenTreePtr gtOpLocation;
+ GenTreePtr gtOpValue;
+ GenTreePtr gtOpComparand;
+
+ GenTreeCmpXchg(var_types type, GenTreePtr loc, GenTreePtr val, GenTreePtr comparand) :
+ GenTree(GT_CMPXCHG, type),
+ gtOpLocation(loc), gtOpValue(val), gtOpComparand(comparand)
+ {
+ // There's no reason to do a compare-exchange on a local location, so we'll assume that all of these
+ // have global effects.
+ gtFlags |= GTF_GLOB_EFFECT;
+ }
+#if DEBUGGABLE_GENTREE
+ GenTreeCmpXchg() : GenTree() {}
+#endif
+};
+
+
+
+struct GenTreeFptrVal: public GenTree
+{
+ CORINFO_METHOD_HANDLE gtFptrMethod;
+
+#ifdef FEATURE_READYTORUN_COMPILER
+ CORINFO_CONST_LOOKUP gtEntryPoint;
+ CORINFO_CONST_LOOKUP gtDelegateCtor;
+#endif
+
+ GenTreeFptrVal(var_types type, CORINFO_METHOD_HANDLE meth) :
+ GenTree(GT_FTN_ADDR, type),
+ gtFptrMethod(meth)
+ {}
+#if DEBUGGABLE_GENTREE
+ GenTreeFptrVal() : GenTree() {}
+#endif
+};
+
+/* gtQmark */
+struct GenTreeQmark : public GenTreeOp
+{
+ // Livesets on entry to then and else subtrees
+ VARSET_TP gtThenLiveSet;
+ VARSET_TP gtElseLiveSet;
+
+ // The "Compiler*" argument is not a DEBUG_ARG here because we use it to keep track of the set of
+ // (possible) QMark nodes.
+ GenTreeQmark(var_types type, GenTreePtr cond, GenTreePtr colonOp, class Compiler* comp);
+
+#if DEBUGGABLE_GENTREE
+ GenTreeQmark() : GenTreeOp(GT_QMARK, TYP_INT, NULL, NULL) {}
+#endif
+};
+
+
+#if INLINE_MATH
+
+/* gtMath -- math intrinsic (possibly-binary op [NULL op2 is allowed] with an additional field) */
+
+struct GenTreeMath: public GenTreeOp
+{
+ CorInfoIntrinsics gtMathFN;
+
+ GenTreeMath(var_types type, GenTreePtr op1, CorInfoIntrinsics mathFN) :
+ GenTreeOp(GT_MATH, type, op1, NULL),
+ gtMathFN(mathFN)
+ {}
+
+ GenTreeMath(var_types type, GenTreePtr op1, GenTreePtr op2, CorInfoIntrinsics mathFN) :
+ GenTreeOp(GT_MATH, type, op1, op2),
+ gtMathFN(mathFN)
+ {}
+
+#if DEBUGGABLE_GENTREE
+ GenTreeMath() : GenTreeOp() {}
+#endif
+};
+
+#endif // INLINE_MATH
+
+#ifdef FEATURE_SIMD
+
+/* gtSIMD -- SIMD intrinsic (possibly-binary op [NULL op2 is allowed] with additional fields) */
+struct GenTreeSIMD: public GenTreeOp
+{
+ SIMDIntrinsicID gtSIMDIntrinsicID; // operation Id
+ var_types gtSIMDBaseType; // SIMD vector base type
+ unsigned gtSIMDSize; // SIMD vector size in bytes
+
+ GenTreeSIMD(var_types type, GenTreePtr op1, SIMDIntrinsicID simdIntrinsicID, var_types baseType, unsigned size) :
+ GenTreeOp(GT_SIMD, type, op1, nullptr),
+ gtSIMDIntrinsicID(simdIntrinsicID),
+ gtSIMDBaseType(baseType),
+ gtSIMDSize(size)
+ {}
+
+ GenTreeSIMD(var_types type, GenTreePtr op1, GenTreePtr op2, SIMDIntrinsicID simdIntrinsicID, var_types baseType, unsigned size) :
+ GenTreeOp(GT_SIMD, type, op1, op2),
+ gtSIMDIntrinsicID(simdIntrinsicID),
+ gtSIMDBaseType(baseType),
+ gtSIMDSize(size)
+ {}
+
+#if DEBUGGABLE_GENTREE
+ GenTreeSIMD() : GenTreeOp() {}
+#endif
+};
+#endif // FEATURE_SIMD
+
+/* gtIndex -- array access */
+
+struct GenTreeIndex: public GenTreeOp
+{
+ GenTreePtr& Arr() { return gtOp1; }
+ GenTreePtr& Index() { return gtOp2; }
+
+ unsigned gtIndElemSize; // size of elements in the array
+ CORINFO_CLASS_HANDLE gtStructElemClass; // If the element type is a struct, this is the struct type.
+
+ GenTreeIndex(var_types type, GenTreePtr arr, GenTreePtr ind, unsigned indElemSize) :
+ GenTreeOp(GT_INDEX, type, arr, ind),
+ gtIndElemSize(indElemSize),
+ gtStructElemClass(nullptr) // We always initialize this after construction.
+ {
+#ifdef DEBUG
+ static ConfigDWORD fJitSkipArrayBoundCheck;
+ if (fJitSkipArrayBoundCheck.val(CLRConfig::INTERNAL_JitSkipArrayBoundCheck) == 1)
+ {
+ // Skip bounds check
+ }
+ else
+#endif
+ {
+ // Do bounds check
+ gtFlags |= GTF_INX_RNGCHK;
+ }
+
+ if (type == TYP_REF)
+ {
+ gtFlags |= GTF_INX_REFARR_LAYOUT;
+ }
+
+ gtFlags |= GTF_EXCEPT|GTF_GLOB_REF;
+ }
+#if DEBUGGABLE_GENTREE
+ GenTreeIndex() : GenTreeOp() {}
+#endif
+};
+
+/* gtArrLen -- array length (GT_ARR_LENGTH)
+ GT_ARR_LENGTH is used for "arr.length" */
+
+struct GenTreeArrLen: public GenTreeUnOp
+{
+ GenTreePtr& ArrRef() { return gtOp1; } // the array address node
+private:
+ int gtArrLenOffset; // constant to add to "gtArrRef" to get the address of the array length.
+
+public:
+ inline int ArrLenOffset() {
+ return gtArrLenOffset;
+ }
+
+ GenTreeArrLen(var_types type, GenTreePtr arrRef, int lenOffset) :
+ GenTreeUnOp(GT_ARR_LENGTH, type, arrRef),
+ gtArrLenOffset(lenOffset)
+ {}
+
+#if DEBUGGABLE_GENTREE
+ GenTreeArrLen() : GenTreeUnOp() {}
+#endif
+};
+
+// This takes an array length,an index value, and the label to jump to if the index is out of range.
+// It generates no result.
+
+struct GenTreeBoundsChk: public GenTree
+{
+ GenTreePtr gtArrLen; // An expression for the length of the array being indexed.
+ GenTreePtr gtIndex; // The index expression.
+
+ GenTreePtr gtIndRngFailBB; // Label to jump to for array-index-out-of-range
+
+ /* Only out-of-ranges at same stack depth can jump to the same label (finding return address is easier)
+ For delayed calling of fgSetRngChkTarget() so that the
+ optimizer has a chance of eliminating some of the rng checks */
+ unsigned gtStkDepth;
+
+ GenTreeBoundsChk(genTreeOps oper, var_types type, GenTreePtr arrLen, GenTreePtr index) :
+ GenTree(oper, type),
+ gtArrLen(arrLen), gtIndex(index),
+ gtIndRngFailBB(NULL),
+ gtStkDepth(0)
+ {
+ // Effects flags propagate upwards.
+ gtFlags |= (arrLen->gtFlags & GTF_ALL_EFFECT);
+ gtFlags |= GTF_EXCEPT;
+ }
+#if DEBUGGABLE_GENTREE
+ GenTreeBoundsChk() : GenTree() {}
+#endif
+
+ // If the gtArrLen is really an array length, returns array reference, else "NULL".
+ GenTreePtr GetArray()
+ {
+ if (gtArrLen->OperGet() == GT_ARR_LENGTH)
+ {
+ return gtArrLen->gtArrLen.ArrRef();
+ }
+ else
+ {
+ return NULL;
+ }
+ }
+};
+
+// gtArrElem -- general array element (GT_ARR_ELEM), for non "SZ_ARRAYS"
+// -- multidimensional arrays, or 1-d arrays with non-zero lower bounds.
+
+struct GenTreeArrElem: public GenTree
+{
+ GenTreePtr gtArrObj;
+
+ #define GT_ARR_MAX_RANK 3
+ GenTreePtr gtArrInds[GT_ARR_MAX_RANK]; // Indices
+ unsigned char gtArrRank; // Rank of the array
+
+ unsigned char gtArrElemSize; // !!! Caution, this is an "unsigned char", it is used only
+ // on the optimization path of array intrisics.
+ // It stores the size of array elements WHEN it can fit
+ // into an "unsigned char".
+ // This has caused VSW 571394.
+ var_types gtArrElemType; // The array element type
+
+ // Requires that "inds" is a pointer to an array of "rank" GenTreePtrs for the indices.
+ GenTreeArrElem(var_types type, GenTreePtr arr, unsigned char rank, unsigned char elemSize, var_types elemType, GenTreePtr* inds) :
+ GenTree(GT_ARR_ELEM, type),
+ gtArrObj(arr), gtArrRank(rank), gtArrElemSize(elemSize), gtArrElemType(elemType)
+ {
+ for (unsigned char i = 0; i < rank; i++) gtArrInds[i] = inds[i];
+ gtFlags |= GTF_EXCEPT;
+ }
+#if DEBUGGABLE_GENTREE
+ GenTreeArrElem() : GenTree() {}
+#endif
+};
+
+//--------------------------------------------
+//
+// GenTreeArrIndex (gtArrIndex): Expression to bounds-check the index for one dimension of a
+// multi-dimensional or non-zero-based array., and compute the effective index
+// (i.e. subtracting the lower bound).
+//
+// Notes:
+// This node is similar in some ways to GenTreeBoundsChk, which ONLY performs the check.
+// The reason that this node incorporates the check into the effective index computation is
+// to avoid duplicating the codegen, as the effective index is required to compute the
+// offset anyway.
+// TODO-CQ: Enable optimization of the lower bound and length by replacing this:
+// /--* <arrObj>
+// +--* <index0>
+// +--* ArrIndex[i, ]
+// with something like:
+// /--* <arrObj>
+// /--* ArrLowerBound[i, ]
+// | /--* <arrObj>
+// +--* ArrLen[i, ] (either generalize GT_ARR_LENGTH or add a new node)
+// +--* <index0>
+// +--* ArrIndex[i, ]
+// Which could, for example, be optimized to the following when known to be within bounds:
+// /--* TempForLowerBoundDim0
+// +--* <index0>
+// +--* - (GT_SUB)
+//
+struct GenTreeArrIndex: public GenTreeOp
+{
+ // The array object - may be any expression producing an Array reference, but is likely to be a lclVar.
+ GenTreePtr& ArrObj() { return gtOp1; }
+ // The index expression - may be any integral expression.
+ GenTreePtr& IndexExpr() { return gtOp2; }
+ unsigned char gtCurrDim; // The current dimension
+ unsigned char gtArrRank; // Rank of the array
+ var_types gtArrElemType; // The array element type
+
+ GenTreeArrIndex(var_types type, GenTreePtr arrObj, GenTreePtr indexExpr,
+ unsigned char currDim, unsigned char arrRank, var_types elemType) :
+ GenTreeOp(GT_ARR_INDEX, type, arrObj, indexExpr),
+ gtCurrDim(currDim), gtArrRank(arrRank), gtArrElemType(elemType)
+ {
+ gtFlags |= GTF_EXCEPT;
+ }
+#if DEBUGGABLE_GENTREE
+protected:
+ friend GenTree;
+ // Used only for GenTree::GetVtableForOper()
+ GenTreeArrIndex() : GenTreeOp() {}
+#endif
+};
+
+// Represents either an InitBlk, InitObj, CpBlk or CpObj
+// MSIL OpCode.
+struct GenTreeBlkOp : public GenTreeOp
+{
+public:
+ // The destination for the CpBlk/CpObj/InitBlk/InitObj to copy bits to
+ GenTreePtr Dest() {
+ assert(gtOp1->gtOper == GT_LIST);
+ return gtOp1->gtOp.gtOp1;
+ }
+
+ // True if this BlkOpNode is a volatile memory operation.
+ bool IsVolatile() const { return (gtFlags & GTF_BLK_VOLATILE) != 0; }
+
+ // Instruction selection: during codegen time, what code sequence we will be using
+ // to encode this operation.
+ enum
+ {
+ BlkOpKindInvalid,
+ BlkOpKindHelper,
+ BlkOpKindRepInstr,
+ BlkOpKindUnroll,
+ } gtBlkOpKind;
+
+ bool gtBlkOpGcUnsafe;
+
+ GenTreeBlkOp(genTreeOps oper) :
+ GenTreeOp(oper, TYP_VOID DEBUG_ARG(true)),
+ gtBlkOpKind(BlkOpKindInvalid),
+ gtBlkOpGcUnsafe(false)
+ {
+ assert(OperIsBlkOp(oper));
+ }
+
+#if DEBUGGABLE_GENTREE
+protected:
+ friend GenTree;
+ GenTreeBlkOp() : GenTreeOp(){}
+#endif // DEBUGGABLE_GENTREE
+};
+
+// Represents a CpObj MSIL Node.
+struct GenTreeCpObj : public GenTreeBlkOp
+{
+public:
+ // The source for the CpBlk/CpObj to copy bits from
+ GenTreePtr Source() {
+ assert(gtOper == GT_COPYOBJ && gtOp1->gtOper == GT_LIST);
+ return gtOp1->gtOp.gtOp2;
+ }
+
+ // In the case of CopyObj, this is the class token that represents the type that is being copied.
+ GenTreePtr ClsTok() { return gtOp2; }
+
+ // If non-null, this array represents the gc-layout of the class that is being copied
+ // with CpObj.
+ BYTE* gtGcPtrs;
+
+ // If non-zero, this is the number of slots in the class layout that
+ // contain gc-pointers.
+ unsigned gtGcPtrCount;
+
+ // If non-zero, the number of pointer-sized slots that constitutes the class token in CpObj.
+ unsigned gtSlots;
+
+ GenTreeCpObj(unsigned gcPtrCount, unsigned gtSlots, BYTE* gtGcPtrs) :
+ GenTreeBlkOp(GT_COPYOBJ),
+ gtGcPtrs(gtGcPtrs),
+ gtGcPtrCount(gcPtrCount),
+ gtSlots(gtSlots){ }
+
+#if DEBUGGABLE_GENTREE
+protected:
+ friend GenTree;
+ GenTreeCpObj() : GenTreeBlkOp(),
+ gtGcPtrs(nullptr),
+ gtGcPtrCount(0),
+ gtSlots(0) {}
+#endif // DEBUGGABLE_GENTREE
+};
+
+// Represents either an InitBlk or InitObj MSIL OpCode.
+struct GenTreeInitBlk : public GenTreeBlkOp
+{
+public:
+
+ // The value used to fill the destination buffer.
+ GenTreePtr InitVal() { assert(gtOp1->gtOper == GT_LIST);
+ return gtOp1->gtOp.gtOp2; }
+
+ // The size of the buffer to be copied.
+ GenTreePtr Size() { return gtOp2; }
+
+ GenTreeInitBlk() : GenTreeBlkOp(GT_INITBLK){}
+
+#if DEBUGGABLE_GENTREE
+protected:
+ friend GenTree;
+#endif // DEBUGGABLE_GENTREE
+};
+
+// Represents a CpBlk or CpObj with no GC-pointers MSIL OpCode.
+struct GenTreeCpBlk : public GenTreeBlkOp
+{
+public:
+
+ // The value used to fill the destination buffer.
+ // The source for the CpBlk/CpObj to copy bits from
+ GenTreePtr Source() { assert(gtOp1->gtOper == GT_LIST);
+ return gtOp1->gtOp.gtOp2; }
+
+ // The size of the buffer to be copied.
+ GenTreePtr Size() { return gtOp2; }
+
+ GenTreeCpBlk() : GenTreeBlkOp(GT_COPYBLK){}
+
+#if DEBUGGABLE_GENTREE
+protected:
+ friend GenTree;
+#endif // DEBUGGABLE_GENTREE
+};
+
+//--------------------------------------------
+//
+// GenTreeArrOffset (gtArrOffset): Expression to compute the accumulated offset for the address
+// of an element of a multi-dimensional or non-zero-based array.
+//
+// Notes:
+// The result of this expression is (gtOffset * dimSize) + gtIndex
+// where dimSize is the length/stride/size of the dimension, and is obtained from gtArrObj.
+// This node is generated in conjunction with the GenTreeArrIndex node, which computes the
+// effective index for a single dimension. The sub-trees can be separately optimized, e.g.
+// within a loop body where the expression for the 0th dimension may be invariant.
+//
+// Here is an example of how the tree might look for a two-dimension array reference:
+// /--* const 0
+// | /--* <arrObj>
+// | +--* <index0>
+// +--* ArrIndex[i, ]
+// +--* <arrObj>
+// /--| arrOffs[i, ]
+// | +--* <arrObj>
+// | +--* <index1>
+// +--* ArrIndex[*,j]
+// +--* <arrObj>
+// /--| arrOffs[*,j]
+// TODO-CQ: see comment on GenTreeArrIndex for how its representation may change. When that
+// is done, we will also want to replace the <arrObj> argument to arrOffs with the
+// ArrLen as for GenTreeArrIndex.
+//
+struct GenTreeArrOffs: public GenTree
+{
+ GenTreePtr gtOffset; // The accumulated offset for lower dimensions - must be TYP_I_IMPL, and
+ // will either be a CSE temp, the constant 0, or another GenTreeArrOffs node.
+ GenTreePtr gtIndex; // The effective index for the current dimension - must be non-negative
+ // and can be any expression (though it is likely to be either a GenTreeArrIndex,
+ // node, a lclVar, or a constant).
+ GenTreePtr gtArrObj; // The array object - may be any expression producing an Array reference,
+ // but is likely to be a lclVar.
+ unsigned char gtCurrDim; // The current dimension
+ unsigned char gtArrRank; // Rank of the array
+ var_types gtArrElemType; // The array element type
+
+ GenTreeArrOffs(var_types type, GenTreePtr offset, GenTreePtr index, GenTreePtr arrObj,
+ unsigned char currDim, unsigned char rank, var_types elemType) :
+ GenTree(GT_ARR_OFFSET, type), gtOffset(offset), gtIndex(index), gtArrObj(arrObj),
+ gtCurrDim(currDim), gtArrRank(rank), gtArrElemType(elemType)
+ {
+ assert(index->gtFlags & GTF_EXCEPT);
+ gtFlags |= GTF_EXCEPT;
+ }
+#if DEBUGGABLE_GENTREE
+ GenTreeArrOffs() : GenTree() {}
+#endif
+};
+
+/* gtAddrMode -- Target-specific canonicalized addressing expression (GT_LEA) */
+
+struct GenTreeAddrMode: public GenTreeOp
+{
+ // Address is Base + Index*Scale + Offset.
+ // These are the legal patterns:
+ //
+ // Base // Base != nullptr && Index == nullptr && Scale == 0 && Offset == 0
+ // Base + Index*Scale // Base != nullptr && Index != nullptr && Scale != 0 && Offset == 0
+ // Base + Offset // Base != nullptr && Index == nullptr && Scale == 0 && Offset != 0
+ // Base + Index*Scale + Offset // Base != nullptr && Index != nullptr && Scale != 0 && Offset != 0
+ // Index*Scale // Base == nullptr && Index != nullptr && Scale > 1 && Offset == 0
+ // Index*Scale + Offset // Base == nullptr && Index != nullptr && Scale > 1 && Offset != 0
+ // Offset // Base == nullptr && Index == nullptr && Scale == 0 && Offset != 0
+ //
+ // So, for example:
+ // 1. Base + Index is legal with Scale==1
+ // 2. If Index is null, Scale should be zero (or unintialized / unused)
+ // 3. If Scale==1, then we should have "Base" instead of "Index*Scale", and "Base + Offset" instead of "Index*Scale + Offset".
+
+ // First operand is base address/pointer
+ bool HasBase() const { return gtOp1 != nullptr; }
+ GenTreePtr& Base() { return gtOp1; }
+
+ // Second operand is scaled index value
+ bool HasIndex() const { return gtOp2 != nullptr; }
+ GenTreePtr& Index() { return gtOp2; }
+
+ unsigned gtScale; // The scale factor
+ unsigned gtOffset; // The offset to add
+
+ GenTreeAddrMode(var_types type, GenTreePtr base, GenTreePtr index,
+ unsigned scale, unsigned offset) :
+ GenTreeOp(GT_LEA, type, base, index )
+ {
+ gtScale = scale;
+ gtOffset = offset;
+ }
+#if DEBUGGABLE_GENTREE
+protected:
+ friend GenTree;
+ // Used only for GenTree::GetVtableForOper()
+ GenTreeAddrMode() : GenTreeOp() {}
+#endif
+};
+
+// Indir is just an op, no additional data, but some additional abstractions
+struct GenTreeIndir: public GenTreeOp
+{
+ // like an assign, op1 is the destination
+ GenTreePtr& Addr() { return gtOp1; }
+
+ // these methods provide an interface to the indirection node which
+ bool HasBase();
+ bool HasIndex();
+ GenTree* Base();
+ GenTree* Index();
+ unsigned Scale();
+ size_t Offset();
+
+ GenTreeIndir(genTreeOps oper, var_types type, GenTree *addr, GenTree *data) :
+ GenTreeOp(oper, type, addr, data)
+ {
+ }
+
+#if DEBUGGABLE_GENTREE
+protected:
+ friend GenTree;
+ // Used only for GenTree::GetVtableForOper()
+ GenTreeIndir() : GenTreeOp() {}
+#endif
+};
+
+// StoreInd is just a BinOp, no additional data
+struct GenTreeStoreInd: public GenTreeIndir
+{
+ GenTreePtr& Data() { return gtOp2; }
+
+ GenTreeStoreInd(var_types type, GenTree *destPtr, GenTree *data) :
+ GenTreeIndir(GT_STOREIND, type, destPtr, data)
+ {
+ }
+
+#if DEBUGGABLE_GENTREE
+protected:
+ friend GenTree;
+ // Used only for GenTree::GetVtableForOper()
+ GenTreeStoreInd() : GenTreeIndir() {}
+#endif
+};
+
+
+/* gtRetExp -- Place holder for the return expression from an inline candidate (GT_RET_EXPR) */
+
+struct GenTreeRetExpr: public GenTree
+{
+ GenTreePtr gtInlineCandidate;
+
+#ifdef _TARGET_ARM_
+ CORINFO_CLASS_HANDLE gtRetClsHnd;
+#endif
+
+ GenTreeRetExpr(var_types type) :
+ GenTree(GT_RET_EXPR, type)
+ {}
+#if DEBUGGABLE_GENTREE
+ GenTreeRetExpr() : GenTree() {}
+#endif
+};
+
+
+/* gtStmt -- 'statement expr' (GT_STMT) */
+
+struct GenTreeStmt: public GenTree
+{
+ GenTreePtr gtStmtExpr; // root of the expression tree
+ GenTreePtr gtStmtList; // first node (for forward walks)
+
+ inlExpPtr gtInlineExpList; // The inline expansion list of this statement.
+ // This is a list of CORINFO_METHOD_HANDLEs
+ // that shows the history of inline expansion
+ // which leads to this statement.
+
+#if defined(DEBUGGING_SUPPORT) || defined(DEBUG)
+ IL_OFFSETX gtStmtILoffsx; // instr offset (if available)
+#endif
+
+#ifdef DEBUG
+ IL_OFFSET gtStmtLastILoffs;// instr offset at end of stmt
+#endif
+
+ bool gtStmtIsTopLevel()
+ {
+ return (gtFlags & GTF_STMT_TOP_LEVEL) != 0;
+ }
+
+ bool gtStmtIsEmbedded()
+ {
+ return !gtStmtIsTopLevel();
+ }
+
+ // Return the next statement, if it is embedded, otherwise nullptr
+ GenTreeStmt* gtStmtNextIfEmbedded()
+ {
+ GenTree* nextStmt = gtNext;
+ if (nextStmt != nullptr && nextStmt->gtStmt.gtStmtIsEmbedded())
+ {
+ return nextStmt->AsStmt();
+ }
+ else
+ {
+ return nullptr;
+ }
+ }
+
+ GenTree* gtStmtNextTopLevelStmt()
+ {
+ GenTree* nextStmt = gtNext;
+ while (nextStmt != nullptr && nextStmt->gtStmt.gtStmtIsEmbedded())
+ {
+ nextStmt = nextStmt->gtNext;
+ }
+ return nextStmt;
+ }
+
+ __declspec(property(get=getNextStmt))
+ GenTreeStmt* gtNextStmt;
+
+ __declspec(property(get=getPrevStmt))
+ GenTreeStmt* gtPrevStmt;
+
+ GenTreeStmt* getNextStmt()
+ {
+ if (gtNext == nullptr)
+ return nullptr;
+ else
+ return gtNext->AsStmt();
+ }
+
+ GenTreeStmt* getPrevStmt()
+ {
+ if (gtPrev == nullptr)
+ return nullptr;
+ else
+ return gtPrev->AsStmt();
+ }
+
+ GenTreeStmt(GenTreePtr expr, IL_OFFSETX offset)
+ : GenTree(GT_STMT, TYP_VOID)
+ , gtStmtExpr(expr)
+ , gtStmtList(nullptr)
+ , gtInlineExpList(nullptr)
+#if defined(DEBUGGING_SUPPORT) || defined(DEBUG)
+ , gtStmtILoffsx(offset)
+#endif
+#ifdef DEBUG
+ , gtStmtLastILoffs(BAD_IL_OFFSET)
+#endif
+ {
+ // Statements can't have statements as part of their expression tree.
+ assert(expr->gtOper != GT_STMT);
+
+ gtFlags |= GTF_STMT_TOP_LEVEL;
+
+ // Set the statement to have the same costs as the top node of the tree.
+ // This is used long before costs have been assigned, so we need to copy
+ // the raw costs.
+ CopyRawCosts(expr);
+ }
+
+#if DEBUGGABLE_GENTREE
+ GenTreeStmt() : GenTree(GT_STMT, TYP_VOID) {}
+#endif
+};
+
+/* gtLdObj -- 'push object' (GT_LDOBJ). */
+
+struct GenTreeLdObj: public GenTreeUnOp
+{
+ CORINFO_CLASS_HANDLE gtClass; // object being loaded
+ GenTreePtr * gtFldTreeList; // The list of trees that represents the fields of this struct
+
+ GenTreeLdObj(var_types type, GenTreePtr op, CORINFO_CLASS_HANDLE cls) :
+ GenTreeUnOp(GT_LDOBJ, type, op),
+ gtClass(cls), gtFldTreeList(NULL)
+ {
+ gtFlags |= GTF_GLOB_REF; // A LdObj is always a global reference.
+ }
+#if DEBUGGABLE_GENTREE
+ GenTreeLdObj() : GenTreeUnOp() {}
+#endif
+};
+
+
+/* NOTE: Any tree nodes that are larger than 8 bytes (two ints or
+ pointers) must be flagged as 'large' in GenTree::InitNodeSize().
+ */
+
+
+/* gtClsVar -- 'static data member' (GT_CLS_VAR) */
+
+struct GenTreeClsVar: public GenTree
+{
+ CORINFO_FIELD_HANDLE gtClsVarHnd;
+ FieldSeqNode* gtFieldSeq;
+
+ GenTreeClsVar(var_types type, CORINFO_FIELD_HANDLE clsVarHnd, FieldSeqNode* fldSeq) :
+ GenTree(GT_CLS_VAR, type),
+ gtClsVarHnd(clsVarHnd),
+ gtFieldSeq(fldSeq)
+ {
+ gtFlags |= GTF_GLOB_REF;
+ }
+#if DEBUGGABLE_GENTREE
+ GenTreeClsVar() : GenTree() {}
+#endif
+};
+
+/* gtArgPlace -- 'register argument placeholder' (GT_ARGPLACE) */
+
+struct GenTreeArgPlace: public GenTree
+{
+ CORINFO_CLASS_HANDLE gtArgPlaceClsHnd; // Needed when we have a TYP_STRUCT argument
+
+ GenTreeArgPlace(var_types type, CORINFO_CLASS_HANDLE clsHnd) :
+ GenTree(GT_ARGPLACE, type),
+ gtArgPlaceClsHnd(clsHnd)
+ {}
+#if DEBUGGABLE_GENTREE
+ GenTreeArgPlace() : GenTree() {}
+#endif
+};
+
+/* gtLabel -- code label target (GT_LABEL) */
+
+struct GenTreeLabel: public GenTree
+{
+ BasicBlock* gtLabBB;
+
+ GenTreeLabel(BasicBlock* bb) :
+ GenTree(GT_LABEL, TYP_VOID),
+ gtLabBB(bb)
+ {}
+#if DEBUGGABLE_GENTREE
+ GenTreeLabel() : GenTree() {}
+#endif
+};
+
+/* gtPhiArg -- phi node rhs argument, var = phi(phiarg, phiarg, phiarg...); GT_PHI_ARG */
+struct GenTreePhiArg: public GenTreeLclVarCommon
+{
+ BasicBlock * gtPredBB;
+
+ GenTreePhiArg(var_types type, unsigned lclNum, unsigned snum, BasicBlock* block)
+ : GenTreeLclVarCommon(GT_PHI_ARG, type, lclNum)
+ , gtPredBB(block)
+ {
+ SetSsaNum(snum);
+ }
+
+#if DEBUGGABLE_GENTREE
+ GenTreePhiArg() : GenTreeLclVarCommon() {}
+#endif
+};
+
+/* gtPutArgStk -- Argument passed on stack */
+
+struct GenTreePutArgStk: public GenTreeUnOp
+{
+ unsigned gtSlotNum; // Slot number of the argument to be passed on stack
+
+#if FEATURE_FASTTAILCALL
+ bool putInIncomingArgArea; // Whether this arg needs to be placed in incoming arg area.
+ // By default this is false and will be placed in out-going arg area.
+ // Fast tail calls set this to true.
+ // In future if we need to add more such bool fields consider bit fields.
+
+ GenTreePutArgStk(genTreeOps oper, var_types type, unsigned slotNum, bool _putInIncomingArgArea = false
+ DEBUG_ARG(GenTreePtr callNode = NULL) DEBUG_ARG(bool largeNode = false)) :
+ GenTreeUnOp(oper, type DEBUG_ARG(largeNode)),
+ gtSlotNum(slotNum), putInIncomingArgArea(_putInIncomingArgArea)
+ {
+#ifdef DEBUG
+ gtCall = callNode;
+#endif
+ }
+
+
+ GenTreePutArgStk(genTreeOps oper, var_types type, GenTreePtr op1, unsigned slotNum, bool _putInIncomingArgArea = false
+ DEBUG_ARG(GenTreePtr callNode = NULL) DEBUG_ARG(bool largeNode = false)) :
+ GenTreeUnOp(oper, type, op1 DEBUG_ARG(largeNode)),
+ gtSlotNum(slotNum), putInIncomingArgArea(_putInIncomingArgArea)
+ {
+#ifdef DEBUG
+ gtCall = callNode;
+#endif
+ }
+
+#else // !FEATURE_FASTTAIL_CALL
+
+ GenTreePutArgStk(genTreeOps oper, var_types type, unsigned slotNum
+ DEBUG_ARG(GenTreePtr callNode = NULL) DEBUG_ARG(bool largeNode = false)) :
+ GenTreeUnOp(oper, type DEBUG_ARG(largeNode)),
+ gtSlotNum(slotNum)
+ {
+#ifdef DEBUG
+ gtCall = callNode;
+#endif
+ }
+
+
+ GenTreePutArgStk(genTreeOps oper, var_types type, GenTreePtr op1, unsigned slotNum
+ DEBUG_ARG(GenTreePtr callNode = NULL) DEBUG_ARG(bool largeNode = false)) :
+ GenTreeUnOp(oper, type, op1 DEBUG_ARG(largeNode)),
+ gtSlotNum(slotNum)
+ {
+#ifdef DEBUG
+ gtCall = callNode;
+#endif
+ }
+#endif // FEATURE_FASTTAILCALL
+
+#ifdef DEBUG
+ GenTreePtr gtCall; // the call node to which this argument belongs
+#endif
+
+#if DEBUGGABLE_GENTREE
+ GenTreePutArgStk() : GenTreeUnOp() {}
+#endif
+};
+
+// Deferred inline functions of GenTree -- these need the subtypes above to
+// be defined already.
+
+inline bool GenTree::IsZero()
+{
+ if ((gtOper == GT_CNS_INT) && (gtIntConCommon.IconValue() == 0))
+ return true;
+
+ if ((gtOper == GT_CNS_LNG) && (gtIntConCommon.LngValue() == 0))
+ return true;
+
+ if ((gtOper == GT_CNS_DBL) && (gtDblCon.gtDconVal == 0.0))
+ return true;
+ return false;
+}
+
+inline bool GenTree::IsBoxedValue()
+{
+ assert(gtOper != GT_BOX || gtBox.BoxOp() != NULL);
+ return (gtOper == GT_BOX) && (gtFlags & GTF_BOX_VALUE);
+}
+
+inline GenTreePtr GenTree::MoveNext()
+{
+ assert(IsList());
+ return gtOp.gtOp2;
+}
+
+inline GenTreePtr GenTree::Current()
+{
+ assert(IsList());
+ return gtOp.gtOp1;
+}
+
+inline GenTreePtr *GenTree::pCurrent()
+{
+ assert(IsList());
+ return &(gtOp.gtOp1);
+}
+
+inline GenTreePtr GenTree::gtGetOp1()
+{
+ return gtOp.gtOp1;
+}
+
+#ifdef DEBUG
+/* static */
+inline bool GenTree::RequiresNonNullOp2(genTreeOps oper)
+{
+ switch (oper)
+ {
+ case GT_ADD:
+ case GT_SUB:
+ case GT_MUL:
+ case GT_DIV:
+ case GT_MOD:
+ case GT_UDIV:
+ case GT_UMOD:
+ case GT_OR:
+ case GT_XOR:
+ case GT_AND:
+ case GT_LSH:
+ case GT_RSH:
+ case GT_RSZ:
+ case GT_INDEX:
+ case GT_ASG:
+ case GT_ASG_ADD:
+ case GT_ASG_SUB:
+ case GT_ASG_MUL:
+ case GT_ASG_DIV:
+ case GT_ASG_MOD:
+ case GT_ASG_UDIV:
+ case GT_ASG_UMOD:
+ case GT_ASG_OR:
+ case GT_ASG_XOR:
+ case GT_ASG_AND:
+ case GT_ASG_LSH:
+ case GT_ASG_RSH:
+ case GT_ASG_RSZ:
+ case GT_EQ:
+ case GT_NE:
+ case GT_LT:
+ case GT_LE:
+ case GT_GE:
+ case GT_GT:
+ case GT_COMMA:
+ case GT_QMARK:
+ case GT_COLON:
+ case GT_MKREFANY:
+ case GT_INITBLK:
+ case GT_COPYBLK:
+ return true;
+ default:
+ return false;
+ }
+}
+#endif // DEBUG
+
+inline GenTreePtr GenTree::gtGetOp2()
+{
+ /* gtOp.gtOp2 is only valid for GTK_BINOP nodes. */
+
+ GenTreePtr op2 = OperIsBinary() ? gtOp.gtOp2 : nullptr;
+
+ // This documents the genTreeOps for which gtOp.gtOp2 cannot be nullptr.
+ // This helps prefix in its analyis of code which calls gtGetOp2()
+
+ assert((op2 != nullptr) || !RequiresNonNullOp2(gtOper));
+
+ return op2;
+}
+
+inline GenTreePtr GenTree::gtEffectiveVal(bool commaOnly)
+{
+ switch (gtOper)
+ {
+ case GT_COMMA:
+ return gtOp.gtOp2->gtEffectiveVal(commaOnly);
+
+ case GT_NOP:
+ if (!commaOnly && gtOp.gtOp1 != NULL)
+ return gtOp.gtOp1->gtEffectiveVal();
+ break;
+
+ default:
+ break;
+ }
+
+ return this;
+}
+
+inline GenTree* GenTree::gtSkipReloadOrCopy()
+{
+ // There can be only one reload or copy (we can't have a reload/copy of a reload/copy)
+ if (gtOper == GT_RELOAD || gtOper == GT_COPY)
+ {
+ assert(gtGetOp1()->OperGet() != GT_RELOAD && gtGetOp1()->OperGet() != GT_COPY);
+ return gtGetOp1();
+ }
+ return this;
+}
+
+inline bool GenTree::IsCnsIntOrI() const
+{
+ return (gtOper == GT_CNS_INT);
+}
+
+inline bool GenTree::IsIntCnsFitsInI32()
+{
+#ifdef _TARGET_64BIT_
+ return IsCnsIntOrI() && ((int)gtIntConCommon.IconValue() == gtIntConCommon.IconValue());
+#else // _TARGET_64BIT_
+ return IsCnsIntOrI();
+#endif // _TARGET_64BIT_
+}
+
+inline bool GenTree::IsCnsFltOrDbl() const
+{
+ return OperGet() == GT_CNS_DBL;
+}
+
+inline bool GenTree::IsCnsNonZeroFltOrDbl()
+{
+ if (OperGet() == GT_CNS_DBL)
+ {
+ double constValue = gtDblCon.gtDconVal;
+ return *(__int64*)&constValue != 0;
+ }
+
+ return false;
+}
+
+inline bool GenTree::IsHelperCall() { return OperGet() == GT_CALL && gtCall.gtCallType == CT_HELPER; }
+
+inline var_types GenTree::CastFromType() { return this->gtCast.CastOp()->TypeGet(); }
+inline var_types& GenTree::CastToType() { return this->gtCast.gtCastType; }
+
+
+/*****************************************************************************/
+
+#ifndef _WIN64
+#include <poppack.h>
+#endif
+
+/*****************************************************************************/
+
+#if SMALL_TREE_NODES
+
+// In debug, on some platforms (e.g., when LATE_DISASM is defined), GenTreeIntCon is bigger than GenTreeLclFld.
+const
+size_t TREE_NODE_SZ_SMALL = max(sizeof(GenTreeIntCon), sizeof(GenTreeLclFld));
+
+#endif // SMALL_TREE_NODES
+
+const
+size_t TREE_NODE_SZ_LARGE = sizeof(GenTreeCall);
+
+/*****************************************************************************
+ * Types returned by GenTree::lvaLclVarRefs()
+ */
+
+enum varRefKinds
+{
+ VR_INVARIANT = 0x00, // an invariant value
+ VR_NONE = 0x00,
+ VR_IND_REF = 0x01, // an object reference
+ VR_IND_SCL = 0x02, // a non-object reference
+ VR_GLB_VAR = 0x04, // a global (clsVar)
+};
+// Add a temp define to avoid merge conflict.
+#define VR_IND_PTR VR_IND_REF
+
+/*****************************************************************************/
+#endif // !GENTREE_H
+/*****************************************************************************/
+